blob: 8e84060a8efda85f8aaaed47f58dec4e34e358d8 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080068#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040069
70#define NUM_IB_PORTS 1
71
72uint kdeth_qp;
73module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75
76uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77module_param(num_vls, uint, S_IRUGO);
78MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79
80/*
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
86 */
87uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88module_param(rcv_intr_timeout, uint, S_IRUGO);
89MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90
91uint rcv_intr_count = 16; /* same as qib */
92module_param(rcv_intr_count, uint, S_IRUGO);
93MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94
95ushort link_crc_mask = SUPPORTED_CRCS;
96module_param(link_crc_mask, ushort, S_IRUGO);
97MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98
99uint loopback;
100module_param_named(loopback, loopback, uint, S_IRUGO);
101MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102
103/* Other driver tunables */
104uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105static ushort crc_14b_sideband = 1;
106static uint use_flr = 1;
107uint quick_linkup; /* skip LNI */
108
109struct flag_table {
110 u64 flag; /* the flag */
111 char *str; /* description string */
112 u16 extra; /* extra information */
113 u16 unused0;
114 u32 unused1;
115};
116
117/* str must be a string constant */
118#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119#define FLAG_ENTRY0(str, flag) {flag, str, 0}
120
121/* Send Error Consequences */
122#define SEC_WRITE_DROPPED 0x1
123#define SEC_PACKET_DROPPED 0x2
124#define SEC_SC_HALTED 0x4 /* per-context only */
125#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400129#define NUM_MAP_REGS 32
130
131/* Bit offset into the GUID which carries HFI id information */
132#define GUID_HFI_INDEX_SHIFT 39
133
134/* extract the emulation revision */
135#define emulator_rev(dd) ((dd)->irev >> 8)
136/* parallel and serial emulation versions are 3 and 4 respectively */
137#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
139
140/* RSM fields */
141
142/* packet type */
143#define IB_PACKET_TYPE 2ull
144#define QW_SHIFT 6ull
145/* QPN[7..1] */
146#define QPN_WIDTH 7ull
147
148/* LRH.BTH: QW 0, OFFSET 48 - for match */
149#define LRH_BTH_QW 0ull
150#define LRH_BTH_BIT_OFFSET 48ull
151#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153#define LRH_BTH_SELECT
154#define LRH_BTH_MASK 3ull
155#define LRH_BTH_VALUE 2ull
156
157/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158#define LRH_SC_QW 0ull
159#define LRH_SC_BIT_OFFSET 56ull
160#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162#define LRH_SC_MASK 128ull
163#define LRH_SC_VALUE 0ull
164
165/* SC[n..0] QW 0, OFFSET 60 - for select */
166#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167
168/* QPN[m+n:1] QW 1, OFFSET 1 */
169#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170
171/* defines to build power on SC2VL table */
172#define SC2VL_VAL( \
173 num, \
174 sc0, sc0val, \
175 sc1, sc1val, \
176 sc2, sc2val, \
177 sc3, sc3val, \
178 sc4, sc4val, \
179 sc5, sc5val, \
180 sc6, sc6val, \
181 sc7, sc7val) \
182( \
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191)
192
193#define DC_SC_VL_VAL( \
194 range, \
195 e0, e0val, \
196 e1, e1val, \
197 e2, e2val, \
198 e3, e3val, \
199 e4, e4val, \
200 e5, e5val, \
201 e6, e6val, \
202 e7, e7val, \
203 e8, e8val, \
204 e9, e9val, \
205 e10, e10val, \
206 e11, e11val, \
207 e12, e12val, \
208 e13, e13val, \
209 e14, e14val, \
210 e15, e15val) \
211( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228)
229
230/* all CceStatus sub-block freeze bits */
231#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235/* all CceStatus sub-block TXE pause bits */
236#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239/* all CceStatus sub-block RXE pause bits */
240#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
241
242/*
243 * CCE Error flags.
244 */
245static struct flag_table cce_err_status_flags[] = {
246/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
248/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
250/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
252/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
254/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
256/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
258/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
260/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
262/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
266/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
268/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
270/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
272/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
274/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
276/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
278/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
280/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
282/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
284/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
286/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
288/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
290/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
292/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
294/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
296/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
298/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
300/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
302/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
304/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
306/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
308/*31*/ FLAG_ENTRY0("LATriggered",
309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
310/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
312/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
314/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
316/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
318/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
320/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
322/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
324/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
326/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
328/*41-63 reserved*/
329};
330
331/*
332 * Misc Error flags
333 */
334#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
335static struct flag_table misc_err_status_flags[] = {
336/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
337/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
338/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
339/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
340/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
341/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
342/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
343/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
344/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
345/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
346/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
347/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
348/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
349};
350
351/*
352 * TXE PIO Error flags and consequences
353 */
354static struct flag_table pio_err_status_flags[] = {
355/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
356 SEC_WRITE_DROPPED,
357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
358/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
359 SEC_SPC_FREEZE,
360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
361/* 2*/ FLAG_ENTRY("PioCsrParity",
362 SEC_SPC_FREEZE,
363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
364/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
365 SEC_SPC_FREEZE,
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
367/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
368 SEC_SPC_FREEZE,
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
370/* 5*/ FLAG_ENTRY("PioPccFifoParity",
371 SEC_SPC_FREEZE,
372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
373/* 6*/ FLAG_ENTRY("PioPecFifoParity",
374 SEC_SPC_FREEZE,
375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
376/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
377 SEC_SPC_FREEZE,
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
379/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
380 SEC_SPC_FREEZE,
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
382/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
383 SEC_SPC_FREEZE,
384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
385/*10*/ FLAG_ENTRY("PioSmPktResetParity",
386 SEC_SPC_FREEZE,
387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
388/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
389 SEC_SPC_FREEZE,
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
391/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
394/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
395 0,
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
397/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
398 0,
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
400/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
403/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
406/*17*/ FLAG_ENTRY("PioInitSmIn",
407 0,
408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
409/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
412/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
415/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
416 0,
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
418/*21*/ FLAG_ENTRY("PioWriteDataParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
421/*22*/ FLAG_ENTRY("PioStateMachine",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
424/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800425 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
427/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800428 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
430/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
431 SEC_SPC_FREEZE,
432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
433/*26*/ FLAG_ENTRY("PioVlfSopParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
436/*27*/ FLAG_ENTRY("PioVlFifoParity",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
439/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
440 SEC_SPC_FREEZE,
441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
442/*29*/ FLAG_ENTRY("PioPpmcSopLen",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
445/*30-31 reserved*/
446/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
449/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
450 SEC_SPC_FREEZE,
451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
452/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
453 SEC_SPC_FREEZE,
454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
455/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
456 SEC_SPC_FREEZE,
457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
458/*36-63 reserved*/
459};
460
461/* TXE PIO errors that cause an SPC freeze */
462#define ALL_PIO_FREEZE_ERR \
463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
492
493/*
494 * TXE SDMA Error flags
495 */
496static struct flag_table sdma_err_status_flags[] = {
497/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
499/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
501/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
503/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
505/*04-63 reserved*/
506};
507
508/* TXE SDMA errors that cause an SPC freeze */
509#define ALL_SDMA_FREEZE_ERR \
510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800514/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
515#define PORT_DISCARD_EGRESS_ERRS \
516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
519
Mike Marciniszyn77241052015-07-30 15:17:43 -0400520/*
521 * TXE Egress Error flags
522 */
523#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
524static struct flag_table egress_err_status_flags[] = {
525/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
526/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
527/* 2 reserved */
528/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
530/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
531/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
532/* 6 reserved */
533/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
534 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
535/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
536 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
537/* 9-10 reserved */
538/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
540/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
541/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
542/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
543/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
544/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
545 SEES(TX_SDMA0_DISALLOWED_PACKET)),
546/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
547 SEES(TX_SDMA1_DISALLOWED_PACKET)),
548/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
549 SEES(TX_SDMA2_DISALLOWED_PACKET)),
550/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
551 SEES(TX_SDMA3_DISALLOWED_PACKET)),
552/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
553 SEES(TX_SDMA4_DISALLOWED_PACKET)),
554/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
555 SEES(TX_SDMA5_DISALLOWED_PACKET)),
556/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
557 SEES(TX_SDMA6_DISALLOWED_PACKET)),
558/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
559 SEES(TX_SDMA7_DISALLOWED_PACKET)),
560/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
561 SEES(TX_SDMA8_DISALLOWED_PACKET)),
562/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
563 SEES(TX_SDMA9_DISALLOWED_PACKET)),
564/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
565 SEES(TX_SDMA10_DISALLOWED_PACKET)),
566/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
567 SEES(TX_SDMA11_DISALLOWED_PACKET)),
568/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
569 SEES(TX_SDMA12_DISALLOWED_PACKET)),
570/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
571 SEES(TX_SDMA13_DISALLOWED_PACKET)),
572/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
573 SEES(TX_SDMA14_DISALLOWED_PACKET)),
574/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
575 SEES(TX_SDMA15_DISALLOWED_PACKET)),
576/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
578/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
580/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
582/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
584/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
586/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
588/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
590/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
592/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
594/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
595/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
596/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
597/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
598/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
599/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
600/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
601/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
602/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
603/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
604/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
605/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
606/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
607/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
608/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
609/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
610/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
611/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
612/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
613/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
614/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
615/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
617/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
618 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
619};
620
621/*
622 * TXE Egress Error Info flags
623 */
624#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
625static struct flag_table egress_err_info_flags[] = {
626/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
627/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
628/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
630/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
631/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
632/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
633/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
634/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
635/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
636/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
637/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
638/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
639/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
640/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
641/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
642/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
643/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
644/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
645/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
646/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
647/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
648};
649
650/* TXE Egress errors that cause an SPC freeze */
651#define ALL_TXE_EGRESS_FREEZE_ERR \
652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
656 | SEES(TX_LAUNCH_CSR_PARITY) \
657 | SEES(TX_SBRD_CTL_CSR_PARITY) \
658 | SEES(TX_CONFIG_PARITY) \
659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
668 | SEES(TX_CREDIT_RETURN_PARITY))
669
670/*
671 * TXE Send error flags
672 */
673#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
674static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500675/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400676/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
677/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
678};
679
680/*
681 * TXE Send Context Error flags and consequences
682 */
683static struct flag_table sc_err_status_flags[] = {
684/* 0*/ FLAG_ENTRY("InconsistentSop",
685 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
687/* 1*/ FLAG_ENTRY("DisallowedPacket",
688 SEC_PACKET_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
690/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
691 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
693/* 3*/ FLAG_ENTRY("WriteOverflow",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
696/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
697 SEC_WRITE_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
699/* 5-63 reserved*/
700};
701
702/*
703 * RXE Receive Error flags
704 */
705#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
706static struct flag_table rxe_err_status_flags[] = {
707/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
708/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
709/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
710/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
711/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
712/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
713/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
714/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
715/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
716/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
717/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
718/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
719/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
720/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
721/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
722/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
723/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
725/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
726/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
727/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
728 RXES(RBUF_BLOCK_LIST_READ_UNC)),
729/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
730 RXES(RBUF_BLOCK_LIST_READ_COR)),
731/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
733/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
734 RXES(RBUF_CSR_QENT_CNT_PARITY)),
735/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
736 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
737/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
738 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
739/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
740/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
741/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
743/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
744/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
745/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
746/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
747/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
748/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
749/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
750/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
751 RXES(RBUF_FL_INITDONE_PARITY)),
752/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
754/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
755/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
756/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
757/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
758 RXES(LOOKUP_DES_PART1_UNC_COR)),
759/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
760 RXES(LOOKUP_DES_PART2_PARITY)),
761/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
762/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
763/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
764/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
765/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
766/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
767/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
768/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
769/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
770/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
771/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
772/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
773/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
774/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
775/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
776/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
777/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
778/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
779/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
780/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
781/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
782/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
783};
784
785/* RXE errors that will trigger an SPC freeze */
786#define ALL_RXE_FREEZE_ERR \
787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
831
832#define RXE_FREEZE_ABORT_MASK \
833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
836
837/*
838 * DCC Error Flags
839 */
840#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
841static struct flag_table dcc_err_flags[] = {
842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
888};
889
890/*
891 * LCB error flags
892 */
893#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
894static struct flag_table lcb_err_flags[] = {
895/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
896/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
897/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
898/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
899 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
900/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
901/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
902/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
903/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
904/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
905/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
906/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
907/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
908/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
909/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
911/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
912/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
913/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
914/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
915/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
916/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
918/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
919/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
920/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
921/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
922/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
923/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
924/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
925 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
926/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
927/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
929/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
930 LCBE(REDUNDANT_FLIT_PARITY_ERR))
931};
932
933/*
934 * DC8051 Error Flags
935 */
936#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
937static struct flag_table dc8051_err_flags[] = {
938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
949};
950
951/*
952 * DC8051 Information Error flags
953 *
954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
955 */
956static struct flag_table dc8051_info_err_flags[] = {
957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
960 FLAG_ENTRY0("Serdes internal loopback failure",
961 FAILED_SERDES_INTERNAL_LOOPBACK),
962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1034static void handle_temp_err(struct hfi1_devdata *);
1035static void dc_shutdown(struct hfi1_devdata *);
1036static void dc_start(struct hfi1_devdata *);
1037
1038/*
1039 * Error interrupt table entry. This is used as input to the interrupt
1040 * "clear down" routine used for all second tier error interrupt register.
1041 * Second tier interrupt registers have a single bit representing them
1042 * in the top-level CceIntStatus.
1043 */
1044struct err_reg_info {
1045 u32 status; /* status CSR offset */
1046 u32 clear; /* clear CSR offset */
1047 u32 mask; /* mask CSR offset */
1048 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1049 const char *desc;
1050};
1051
1052#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1053#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1054#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1055
1056/*
1057 * Helpers for building HFI and DC error interrupt table entries. Different
1058 * helpers are needed because of inconsistent register names.
1059 */
1060#define EE(reg, handler, desc) \
1061 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1062 handler, desc }
1063#define DC_EE1(reg, handler, desc) \
1064 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1065#define DC_EE2(reg, handler, desc) \
1066 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1067
1068/*
1069 * Table of the "misc" grouping of error interrupts. Each entry refers to
1070 * another register containing more information.
1071 */
1072static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1073/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1074/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1075/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1076/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1077/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1078/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1079/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1080/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1081 /* the rest are reserved */
1082};
1083
1084/*
1085 * Index into the Various section of the interrupt sources
1086 * corresponding to the Critical Temperature interrupt.
1087 */
1088#define TCRIT_INT_SOURCE 4
1089
1090/*
1091 * SDMA error interrupt entry - refers to another register containing more
1092 * information.
1093 */
1094static const struct err_reg_info sdma_eng_err =
1095 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1096
1097static const struct err_reg_info various_err[NUM_VARIOUS] = {
1098/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1099/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1100/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1101/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1102/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1103 /* rest are reserved */
1104};
1105
1106/*
1107 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1108 * register can not be derived from the MTU value because 10K is not
1109 * a power of 2. Therefore, we need a constant. Everything else can
1110 * be calculated.
1111 */
1112#define DCC_CFG_PORT_MTU_CAP_10240 7
1113
1114/*
1115 * Table of the DC grouping of error interrupts. Each entry refers to
1116 * another register containing more information.
1117 */
1118static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1119/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1120/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1121/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1122/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1123 /* the rest are reserved */
1124};
1125
1126struct cntr_entry {
1127 /*
1128 * counter name
1129 */
1130 char *name;
1131
1132 /*
1133 * csr to read for name (if applicable)
1134 */
1135 u64 csr;
1136
1137 /*
1138 * offset into dd or ppd to store the counter's value
1139 */
1140 int offset;
1141
1142 /*
1143 * flags
1144 */
1145 u8 flags;
1146
1147 /*
1148 * accessor for stat element, context either dd or ppd
1149 */
1150 u64 (*rw_cntr)(const struct cntr_entry *,
1151 void *context,
1152 int vl,
1153 int mode,
1154 u64 data);
1155};
1156
1157#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1158#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1159
1160#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1161{ \
1162 name, \
1163 csr, \
1164 offset, \
1165 flags, \
1166 accessor \
1167}
1168
1169/* 32bit RXE */
1170#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 port_access_u32_csr)
1175
1176#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY32), \
1179 0, flags | CNTR_32BIT, \
1180 dev_access_u32_csr)
1181
1182/* 64bit RXE */
1183#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 port_access_u64_csr)
1188
1189#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1190CNTR_ELEM(#name, \
1191 (counter * 8 + RCV_COUNTER_ARRAY64), \
1192 0, flags, \
1193 dev_access_u64_csr)
1194
1195#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1196#define OVR_ELM(ctx) \
1197CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001198 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001199 0, CNTR_NORMAL, port_access_u64_csr)
1200
1201/* 32bit TXE */
1202#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY32), \
1205 0, flags | CNTR_32BIT, \
1206 port_access_u32_csr)
1207
1208/* 64bit TXE */
1209#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name, \
1211 (counter * 8 + SEND_COUNTER_ARRAY64), \
1212 0, flags, \
1213 port_access_u64_csr)
1214
1215# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1216CNTR_ELEM(#name,\
1217 counter * 8 + SEND_COUNTER_ARRAY64, \
1218 0, \
1219 flags, \
1220 dev_access_u64_csr)
1221
1222/* CCE */
1223#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1232 0, flags | CNTR_32BIT, \
1233 dev_access_u32_csr)
1234
1235/* DC */
1236#define DC_PERF_CNTR(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dev_access_u64_csr)
1242
1243#define DC_PERF_CNTR_LCB(name, counter, flags) \
1244CNTR_ELEM(#name, \
1245 counter, \
1246 0, \
1247 flags, \
1248 dc_access_lcb_cntr)
1249
1250/* ibp counters */
1251#define SW_IBP_CNTR(name, cntr) \
1252CNTR_ELEM(#name, \
1253 0, \
1254 0, \
1255 CNTR_SYNTH, \
1256 access_ibp_##cntr)
1257
1258u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1259{
1260 u64 val;
1261
1262 if (dd->flags & HFI1_PRESENT) {
1263 val = readq((void __iomem *)dd->kregbase + offset);
1264 return val;
1265 }
1266 return -1;
1267}
1268
1269void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1270{
1271 if (dd->flags & HFI1_PRESENT)
1272 writeq(value, (void __iomem *)dd->kregbase + offset);
1273}
1274
1275void __iomem *get_csr_addr(
1276 struct hfi1_devdata *dd,
1277 u32 offset)
1278{
1279 return (void __iomem *)dd->kregbase + offset;
1280}
1281
1282static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1283 int mode, u64 value)
1284{
1285 u64 ret;
1286
Mike Marciniszyn77241052015-07-30 15:17:43 -04001287 if (mode == CNTR_MODE_R) {
1288 ret = read_csr(dd, csr);
1289 } else if (mode == CNTR_MODE_W) {
1290 write_csr(dd, csr, value);
1291 ret = value;
1292 } else {
1293 dd_dev_err(dd, "Invalid cntr register access mode");
1294 return 0;
1295 }
1296
1297 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1298 return ret;
1299}
1300
1301/* Dev Access */
1302static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1303 void *context, int vl, int mode, u64 data)
1304{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301305 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001306 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001307
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001308 if (entry->flags & CNTR_SDMA) {
1309 if (vl == CNTR_INVALID_VL)
1310 return 0;
1311 csr += 0x100 * vl;
1312 } else {
1313 if (vl != CNTR_INVALID_VL)
1314 return 0;
1315 }
1316 return read_write_csr(dd, csr, mode, data);
1317}
1318
1319static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1320 void *context, int idx, int mode, u64 data)
1321{
1322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1323
1324 if (dd->per_sdma && idx < dd->num_sdma)
1325 return dd->per_sdma[idx].err_cnt;
1326 return 0;
1327}
1328
1329static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1330 void *context, int idx, int mode, u64 data)
1331{
1332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1333
1334 if (dd->per_sdma && idx < dd->num_sdma)
1335 return dd->per_sdma[idx].sdma_int_cnt;
1336 return 0;
1337}
1338
1339static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1340 void *context, int idx, int mode, u64 data)
1341{
1342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1343
1344 if (dd->per_sdma && idx < dd->num_sdma)
1345 return dd->per_sdma[idx].idle_int_cnt;
1346 return 0;
1347}
1348
1349static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1350 void *context, int idx, int mode,
1351 u64 data)
1352{
1353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1354
1355 if (dd->per_sdma && idx < dd->num_sdma)
1356 return dd->per_sdma[idx].progress_int_cnt;
1357 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358}
1359
1360static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1361 int vl, int mode, u64 data)
1362{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301363 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001364
1365 u64 val = 0;
1366 u64 csr = entry->csr;
1367
1368 if (entry->flags & CNTR_VL) {
1369 if (vl == CNTR_INVALID_VL)
1370 return 0;
1371 csr += 8 * vl;
1372 } else {
1373 if (vl != CNTR_INVALID_VL)
1374 return 0;
1375 }
1376
1377 val = read_write_csr(dd, csr, mode, data);
1378 return val;
1379}
1380
1381static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1382 int vl, int mode, u64 data)
1383{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301384 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001385 u32 csr = entry->csr;
1386 int ret = 0;
1387
1388 if (vl != CNTR_INVALID_VL)
1389 return 0;
1390 if (mode == CNTR_MODE_R)
1391 ret = read_lcb_csr(dd, csr, &data);
1392 else if (mode == CNTR_MODE_W)
1393 ret = write_lcb_csr(dd, csr, data);
1394
1395 if (ret) {
1396 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1397 return 0;
1398 }
1399
1400 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1401 return data;
1402}
1403
1404/* Port Access */
1405static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1406 int vl, int mode, u64 data)
1407{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301408 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001409
1410 if (vl != CNTR_INVALID_VL)
1411 return 0;
1412 return read_write_csr(ppd->dd, entry->csr, mode, data);
1413}
1414
1415static u64 port_access_u64_csr(const struct cntr_entry *entry,
1416 void *context, int vl, int mode, u64 data)
1417{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301418 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001419 u64 val;
1420 u64 csr = entry->csr;
1421
1422 if (entry->flags & CNTR_VL) {
1423 if (vl == CNTR_INVALID_VL)
1424 return 0;
1425 csr += 8 * vl;
1426 } else {
1427 if (vl != CNTR_INVALID_VL)
1428 return 0;
1429 }
1430 val = read_write_csr(ppd->dd, csr, mode, data);
1431 return val;
1432}
1433
1434/* Software defined */
1435static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1436 u64 data)
1437{
1438 u64 ret;
1439
1440 if (mode == CNTR_MODE_R) {
1441 ret = *cntr;
1442 } else if (mode == CNTR_MODE_W) {
1443 *cntr = data;
1444 ret = data;
1445 } else {
1446 dd_dev_err(dd, "Invalid cntr sw access mode");
1447 return 0;
1448 }
1449
1450 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1451
1452 return ret;
1453}
1454
1455static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1456 int vl, int mode, u64 data)
1457{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301458 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001459
1460 if (vl != CNTR_INVALID_VL)
1461 return 0;
1462 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1463}
1464
1465static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1466 int vl, int mode, u64 data)
1467{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301468 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001469
1470 if (vl != CNTR_INVALID_VL)
1471 return 0;
1472 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1473}
1474
Dean Luick6d014532015-12-01 15:38:23 -05001475static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1476 void *context, int vl, int mode,
1477 u64 data)
1478{
1479 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1480
1481 if (vl != CNTR_INVALID_VL)
1482 return 0;
1483 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1484}
1485
Mike Marciniszyn77241052015-07-30 15:17:43 -04001486static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1487 void *context, int vl, int mode, u64 data)
1488{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001489 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1490 u64 zero = 0;
1491 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001492
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001493 if (vl == CNTR_INVALID_VL)
1494 counter = &ppd->port_xmit_discards;
1495 else if (vl >= 0 && vl < C_VL_COUNT)
1496 counter = &ppd->port_xmit_discards_vl[vl];
1497 else
1498 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001500 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001501}
1502
1503static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1504 void *context, int vl, int mode, u64 data)
1505{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301506 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001507
1508 if (vl != CNTR_INVALID_VL)
1509 return 0;
1510
1511 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1512 mode, data);
1513}
1514
1515static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1516 void *context, int vl, int mode, u64 data)
1517{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301518 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001519
1520 if (vl != CNTR_INVALID_VL)
1521 return 0;
1522
1523 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1524 mode, data);
1525}
1526
1527u64 get_all_cpu_total(u64 __percpu *cntr)
1528{
1529 int cpu;
1530 u64 counter = 0;
1531
1532 for_each_possible_cpu(cpu)
1533 counter += *per_cpu_ptr(cntr, cpu);
1534 return counter;
1535}
1536
1537static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1538 u64 __percpu *cntr,
1539 int vl, int mode, u64 data)
1540{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001541 u64 ret = 0;
1542
1543 if (vl != CNTR_INVALID_VL)
1544 return 0;
1545
1546 if (mode == CNTR_MODE_R) {
1547 ret = get_all_cpu_total(cntr) - *z_val;
1548 } else if (mode == CNTR_MODE_W) {
1549 /* A write can only zero the counter */
1550 if (data == 0)
1551 *z_val = get_all_cpu_total(cntr);
1552 else
1553 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1554 } else {
1555 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1556 return 0;
1557 }
1558
1559 return ret;
1560}
1561
1562static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1563 void *context, int vl, int mode, u64 data)
1564{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301565 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001566
1567 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1568 mode, data);
1569}
1570
1571static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1572 void *context, int vl, int mode, u64 data)
1573{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301574 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001575
1576 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1577 mode, data);
1578}
1579
1580static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1581 void *context, int vl, int mode, u64 data)
1582{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301583 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001584
1585 return dd->verbs_dev.n_piowait;
1586}
1587
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001588static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1589 void *context, int vl, int mode, u64 data)
1590{
1591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1592
1593 return dd->verbs_dev.n_piodrain;
1594}
1595
Mike Marciniszyn77241052015-07-30 15:17:43 -04001596static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1597 void *context, int vl, int mode, u64 data)
1598{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301599 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001600
1601 return dd->verbs_dev.n_txwait;
1602}
1603
1604static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1605 void *context, int vl, int mode, u64 data)
1606{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301607 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001608
1609 return dd->verbs_dev.n_kmem_wait;
1610}
1611
Dean Luickb4219222015-10-26 10:28:35 -04001612static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1613 void *context, int vl, int mode, u64 data)
1614{
1615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1616
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001617 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1618 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001619}
1620
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001621/* Software counters for the error status bits within MISC_ERR_STATUS */
1622static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1623 void *context, int vl, int mode,
1624 u64 data)
1625{
1626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1627
1628 return dd->misc_err_status_cnt[12];
1629}
1630
1631static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1632 void *context, int vl, int mode,
1633 u64 data)
1634{
1635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1636
1637 return dd->misc_err_status_cnt[11];
1638}
1639
1640static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1641 void *context, int vl, int mode,
1642 u64 data)
1643{
1644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1645
1646 return dd->misc_err_status_cnt[10];
1647}
1648
1649static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1650 void *context, int vl,
1651 int mode, u64 data)
1652{
1653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1654
1655 return dd->misc_err_status_cnt[9];
1656}
1657
1658static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1659 void *context, int vl, int mode,
1660 u64 data)
1661{
1662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1663
1664 return dd->misc_err_status_cnt[8];
1665}
1666
1667static u64 access_misc_efuse_read_bad_addr_err_cnt(
1668 const struct cntr_entry *entry,
1669 void *context, int vl, int mode, u64 data)
1670{
1671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1672
1673 return dd->misc_err_status_cnt[7];
1674}
1675
1676static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1677 void *context, int vl,
1678 int mode, u64 data)
1679{
1680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1681
1682 return dd->misc_err_status_cnt[6];
1683}
1684
1685static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1686 void *context, int vl, int mode,
1687 u64 data)
1688{
1689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1690
1691 return dd->misc_err_status_cnt[5];
1692}
1693
1694static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1695 void *context, int vl, int mode,
1696 u64 data)
1697{
1698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1699
1700 return dd->misc_err_status_cnt[4];
1701}
1702
1703static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1704 void *context, int vl,
1705 int mode, u64 data)
1706{
1707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709 return dd->misc_err_status_cnt[3];
1710}
1711
1712static u64 access_misc_csr_write_bad_addr_err_cnt(
1713 const struct cntr_entry *entry,
1714 void *context, int vl, int mode, u64 data)
1715{
1716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1717
1718 return dd->misc_err_status_cnt[2];
1719}
1720
1721static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1722 void *context, int vl,
1723 int mode, u64 data)
1724{
1725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1726
1727 return dd->misc_err_status_cnt[1];
1728}
1729
1730static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1731 void *context, int vl, int mode,
1732 u64 data)
1733{
1734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1735
1736 return dd->misc_err_status_cnt[0];
1737}
1738
1739/*
1740 * Software counter for the aggregate of
1741 * individual CceErrStatus counters
1742 */
1743static u64 access_sw_cce_err_status_aggregated_cnt(
1744 const struct cntr_entry *entry,
1745 void *context, int vl, int mode, u64 data)
1746{
1747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1748
1749 return dd->sw_cce_err_status_aggregate;
1750}
1751
1752/*
1753 * Software counters corresponding to each of the
1754 * error status bits within CceErrStatus
1755 */
1756static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1757 void *context, int vl, int mode,
1758 u64 data)
1759{
1760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1761
1762 return dd->cce_err_status_cnt[40];
1763}
1764
1765static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1766 void *context, int vl, int mode,
1767 u64 data)
1768{
1769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1770
1771 return dd->cce_err_status_cnt[39];
1772}
1773
1774static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1775 void *context, int vl, int mode,
1776 u64 data)
1777{
1778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1779
1780 return dd->cce_err_status_cnt[38];
1781}
1782
1783static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1784 void *context, int vl, int mode,
1785 u64 data)
1786{
1787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1788
1789 return dd->cce_err_status_cnt[37];
1790}
1791
1792static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1793 void *context, int vl, int mode,
1794 u64 data)
1795{
1796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1797
1798 return dd->cce_err_status_cnt[36];
1799}
1800
1801static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1802 const struct cntr_entry *entry,
1803 void *context, int vl, int mode, u64 data)
1804{
1805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1806
1807 return dd->cce_err_status_cnt[35];
1808}
1809
1810static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1811 const struct cntr_entry *entry,
1812 void *context, int vl, int mode, u64 data)
1813{
1814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1815
1816 return dd->cce_err_status_cnt[34];
1817}
1818
1819static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1820 void *context, int vl,
1821 int mode, u64 data)
1822{
1823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1824
1825 return dd->cce_err_status_cnt[33];
1826}
1827
1828static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1829 void *context, int vl, int mode,
1830 u64 data)
1831{
1832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1833
1834 return dd->cce_err_status_cnt[32];
1835}
1836
1837static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1838 void *context, int vl, int mode, u64 data)
1839{
1840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1841
1842 return dd->cce_err_status_cnt[31];
1843}
1844
1845static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1846 void *context, int vl, int mode,
1847 u64 data)
1848{
1849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1850
1851 return dd->cce_err_status_cnt[30];
1852}
1853
1854static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1855 void *context, int vl, int mode,
1856 u64 data)
1857{
1858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1859
1860 return dd->cce_err_status_cnt[29];
1861}
1862
1863static u64 access_pcic_transmit_back_parity_err_cnt(
1864 const struct cntr_entry *entry,
1865 void *context, int vl, int mode, u64 data)
1866{
1867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1868
1869 return dd->cce_err_status_cnt[28];
1870}
1871
1872static u64 access_pcic_transmit_front_parity_err_cnt(
1873 const struct cntr_entry *entry,
1874 void *context, int vl, int mode, u64 data)
1875{
1876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1877
1878 return dd->cce_err_status_cnt[27];
1879}
1880
1881static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1882 void *context, int vl, int mode,
1883 u64 data)
1884{
1885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1886
1887 return dd->cce_err_status_cnt[26];
1888}
1889
1890static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1891 void *context, int vl, int mode,
1892 u64 data)
1893{
1894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1895
1896 return dd->cce_err_status_cnt[25];
1897}
1898
1899static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1900 void *context, int vl, int mode,
1901 u64 data)
1902{
1903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1904
1905 return dd->cce_err_status_cnt[24];
1906}
1907
1908static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1909 void *context, int vl, int mode,
1910 u64 data)
1911{
1912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1913
1914 return dd->cce_err_status_cnt[23];
1915}
1916
1917static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1918 void *context, int vl,
1919 int mode, u64 data)
1920{
1921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1922
1923 return dd->cce_err_status_cnt[22];
1924}
1925
1926static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1927 void *context, int vl, int mode,
1928 u64 data)
1929{
1930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1931
1932 return dd->cce_err_status_cnt[21];
1933}
1934
1935static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1936 const struct cntr_entry *entry,
1937 void *context, int vl, int mode, u64 data)
1938{
1939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1940
1941 return dd->cce_err_status_cnt[20];
1942}
1943
1944static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1945 void *context, int vl,
1946 int mode, u64 data)
1947{
1948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1949
1950 return dd->cce_err_status_cnt[19];
1951}
1952
1953static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1954 void *context, int vl, int mode,
1955 u64 data)
1956{
1957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1958
1959 return dd->cce_err_status_cnt[18];
1960}
1961
1962static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1963 void *context, int vl, int mode,
1964 u64 data)
1965{
1966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1967
1968 return dd->cce_err_status_cnt[17];
1969}
1970
1971static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1972 void *context, int vl, int mode,
1973 u64 data)
1974{
1975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1976
1977 return dd->cce_err_status_cnt[16];
1978}
1979
1980static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1981 void *context, int vl, int mode,
1982 u64 data)
1983{
1984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1985
1986 return dd->cce_err_status_cnt[15];
1987}
1988
1989static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1990 void *context, int vl,
1991 int mode, u64 data)
1992{
1993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1994
1995 return dd->cce_err_status_cnt[14];
1996}
1997
1998static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1999 void *context, int vl, int mode,
2000 u64 data)
2001{
2002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2003
2004 return dd->cce_err_status_cnt[13];
2005}
2006
2007static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2008 const struct cntr_entry *entry,
2009 void *context, int vl, int mode, u64 data)
2010{
2011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2012
2013 return dd->cce_err_status_cnt[12];
2014}
2015
2016static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2017 const struct cntr_entry *entry,
2018 void *context, int vl, int mode, u64 data)
2019{
2020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2021
2022 return dd->cce_err_status_cnt[11];
2023}
2024
2025static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2026 const struct cntr_entry *entry,
2027 void *context, int vl, int mode, u64 data)
2028{
2029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2030
2031 return dd->cce_err_status_cnt[10];
2032}
2033
2034static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2035 const struct cntr_entry *entry,
2036 void *context, int vl, int mode, u64 data)
2037{
2038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2039
2040 return dd->cce_err_status_cnt[9];
2041}
2042
2043static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2044 const struct cntr_entry *entry,
2045 void *context, int vl, int mode, u64 data)
2046{
2047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2048
2049 return dd->cce_err_status_cnt[8];
2050}
2051
2052static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2053 void *context, int vl,
2054 int mode, u64 data)
2055{
2056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2057
2058 return dd->cce_err_status_cnt[7];
2059}
2060
2061static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2062 const struct cntr_entry *entry,
2063 void *context, int vl, int mode, u64 data)
2064{
2065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2066
2067 return dd->cce_err_status_cnt[6];
2068}
2069
2070static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2071 void *context, int vl, int mode,
2072 u64 data)
2073{
2074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2075
2076 return dd->cce_err_status_cnt[5];
2077}
2078
2079static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2080 void *context, int vl, int mode,
2081 u64 data)
2082{
2083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2084
2085 return dd->cce_err_status_cnt[4];
2086}
2087
2088static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2089 const struct cntr_entry *entry,
2090 void *context, int vl, int mode, u64 data)
2091{
2092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2093
2094 return dd->cce_err_status_cnt[3];
2095}
2096
2097static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2098 void *context, int vl,
2099 int mode, u64 data)
2100{
2101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2102
2103 return dd->cce_err_status_cnt[2];
2104}
2105
2106static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2107 void *context, int vl,
2108 int mode, u64 data)
2109{
2110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2111
2112 return dd->cce_err_status_cnt[1];
2113}
2114
2115static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2116 void *context, int vl, int mode,
2117 u64 data)
2118{
2119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2120
2121 return dd->cce_err_status_cnt[0];
2122}
2123
2124/*
2125 * Software counters corresponding to each of the
2126 * error status bits within RcvErrStatus
2127 */
2128static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2129 void *context, int vl, int mode,
2130 u64 data)
2131{
2132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2133
2134 return dd->rcv_err_status_cnt[63];
2135}
2136
2137static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2138 void *context, int vl,
2139 int mode, u64 data)
2140{
2141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2142
2143 return dd->rcv_err_status_cnt[62];
2144}
2145
2146static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2147 void *context, int vl, int mode,
2148 u64 data)
2149{
2150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2151
2152 return dd->rcv_err_status_cnt[61];
2153}
2154
2155static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2156 void *context, int vl, int mode,
2157 u64 data)
2158{
2159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2160
2161 return dd->rcv_err_status_cnt[60];
2162}
2163
2164static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2165 void *context, int vl,
2166 int mode, u64 data)
2167{
2168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2169
2170 return dd->rcv_err_status_cnt[59];
2171}
2172
2173static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2174 void *context, int vl,
2175 int mode, u64 data)
2176{
2177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2178
2179 return dd->rcv_err_status_cnt[58];
2180}
2181
2182static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2183 void *context, int vl, int mode,
2184 u64 data)
2185{
2186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2187
2188 return dd->rcv_err_status_cnt[57];
2189}
2190
2191static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2192 void *context, int vl, int mode,
2193 u64 data)
2194{
2195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2196
2197 return dd->rcv_err_status_cnt[56];
2198}
2199
2200static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2201 void *context, int vl, int mode,
2202 u64 data)
2203{
2204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2205
2206 return dd->rcv_err_status_cnt[55];
2207}
2208
2209static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2210 const struct cntr_entry *entry,
2211 void *context, int vl, int mode, u64 data)
2212{
2213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2214
2215 return dd->rcv_err_status_cnt[54];
2216}
2217
2218static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2219 const struct cntr_entry *entry,
2220 void *context, int vl, int mode, u64 data)
2221{
2222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2223
2224 return dd->rcv_err_status_cnt[53];
2225}
2226
2227static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2228 void *context, int vl,
2229 int mode, u64 data)
2230{
2231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2232
2233 return dd->rcv_err_status_cnt[52];
2234}
2235
2236static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2237 void *context, int vl,
2238 int mode, u64 data)
2239{
2240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2241
2242 return dd->rcv_err_status_cnt[51];
2243}
2244
2245static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2246 void *context, int vl,
2247 int mode, u64 data)
2248{
2249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2250
2251 return dd->rcv_err_status_cnt[50];
2252}
2253
2254static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2255 void *context, int vl,
2256 int mode, u64 data)
2257{
2258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2259
2260 return dd->rcv_err_status_cnt[49];
2261}
2262
2263static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2264 void *context, int vl,
2265 int mode, u64 data)
2266{
2267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2268
2269 return dd->rcv_err_status_cnt[48];
2270}
2271
2272static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2273 void *context, int vl,
2274 int mode, u64 data)
2275{
2276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2277
2278 return dd->rcv_err_status_cnt[47];
2279}
2280
2281static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2282 void *context, int vl, int mode,
2283 u64 data)
2284{
2285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2286
2287 return dd->rcv_err_status_cnt[46];
2288}
2289
2290static u64 access_rx_hq_intr_csr_parity_err_cnt(
2291 const struct cntr_entry *entry,
2292 void *context, int vl, int mode, u64 data)
2293{
2294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2295
2296 return dd->rcv_err_status_cnt[45];
2297}
2298
2299static u64 access_rx_lookup_csr_parity_err_cnt(
2300 const struct cntr_entry *entry,
2301 void *context, int vl, int mode, u64 data)
2302{
2303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2304
2305 return dd->rcv_err_status_cnt[44];
2306}
2307
2308static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2309 const struct cntr_entry *entry,
2310 void *context, int vl, int mode, u64 data)
2311{
2312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2313
2314 return dd->rcv_err_status_cnt[43];
2315}
2316
2317static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2318 const struct cntr_entry *entry,
2319 void *context, int vl, int mode, u64 data)
2320{
2321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2322
2323 return dd->rcv_err_status_cnt[42];
2324}
2325
2326static u64 access_rx_lookup_des_part2_parity_err_cnt(
2327 const struct cntr_entry *entry,
2328 void *context, int vl, int mode, u64 data)
2329{
2330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2331
2332 return dd->rcv_err_status_cnt[41];
2333}
2334
2335static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2336 const struct cntr_entry *entry,
2337 void *context, int vl, int mode, u64 data)
2338{
2339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2340
2341 return dd->rcv_err_status_cnt[40];
2342}
2343
2344static u64 access_rx_lookup_des_part1_unc_err_cnt(
2345 const struct cntr_entry *entry,
2346 void *context, int vl, int mode, u64 data)
2347{
2348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2349
2350 return dd->rcv_err_status_cnt[39];
2351}
2352
2353static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2354 const struct cntr_entry *entry,
2355 void *context, int vl, int mode, u64 data)
2356{
2357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2358
2359 return dd->rcv_err_status_cnt[38];
2360}
2361
2362static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2363 const struct cntr_entry *entry,
2364 void *context, int vl, int mode, u64 data)
2365{
2366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2367
2368 return dd->rcv_err_status_cnt[37];
2369}
2370
2371static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2372 const struct cntr_entry *entry,
2373 void *context, int vl, int mode, u64 data)
2374{
2375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2376
2377 return dd->rcv_err_status_cnt[36];
2378}
2379
2380static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2381 const struct cntr_entry *entry,
2382 void *context, int vl, int mode, u64 data)
2383{
2384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2385
2386 return dd->rcv_err_status_cnt[35];
2387}
2388
2389static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2390 const struct cntr_entry *entry,
2391 void *context, int vl, int mode, u64 data)
2392{
2393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2394
2395 return dd->rcv_err_status_cnt[34];
2396}
2397
2398static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2399 const struct cntr_entry *entry,
2400 void *context, int vl, int mode, u64 data)
2401{
2402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2403
2404 return dd->rcv_err_status_cnt[33];
2405}
2406
2407static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2408 void *context, int vl, int mode,
2409 u64 data)
2410{
2411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2412
2413 return dd->rcv_err_status_cnt[32];
2414}
2415
2416static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2417 void *context, int vl, int mode,
2418 u64 data)
2419{
2420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2421
2422 return dd->rcv_err_status_cnt[31];
2423}
2424
2425static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2426 void *context, int vl, int mode,
2427 u64 data)
2428{
2429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2430
2431 return dd->rcv_err_status_cnt[30];
2432}
2433
2434static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2435 void *context, int vl, int mode,
2436 u64 data)
2437{
2438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2439
2440 return dd->rcv_err_status_cnt[29];
2441}
2442
2443static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2444 void *context, int vl,
2445 int mode, u64 data)
2446{
2447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2448
2449 return dd->rcv_err_status_cnt[28];
2450}
2451
2452static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2453 const struct cntr_entry *entry,
2454 void *context, int vl, int mode, u64 data)
2455{
2456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2457
2458 return dd->rcv_err_status_cnt[27];
2459}
2460
2461static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2462 const struct cntr_entry *entry,
2463 void *context, int vl, int mode, u64 data)
2464{
2465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2466
2467 return dd->rcv_err_status_cnt[26];
2468}
2469
2470static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2471 const struct cntr_entry *entry,
2472 void *context, int vl, int mode, u64 data)
2473{
2474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2475
2476 return dd->rcv_err_status_cnt[25];
2477}
2478
2479static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2480 const struct cntr_entry *entry,
2481 void *context, int vl, int mode, u64 data)
2482{
2483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2484
2485 return dd->rcv_err_status_cnt[24];
2486}
2487
2488static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2489 const struct cntr_entry *entry,
2490 void *context, int vl, int mode, u64 data)
2491{
2492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2493
2494 return dd->rcv_err_status_cnt[23];
2495}
2496
2497static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2498 const struct cntr_entry *entry,
2499 void *context, int vl, int mode, u64 data)
2500{
2501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2502
2503 return dd->rcv_err_status_cnt[22];
2504}
2505
2506static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2507 const struct cntr_entry *entry,
2508 void *context, int vl, int mode, u64 data)
2509{
2510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2511
2512 return dd->rcv_err_status_cnt[21];
2513}
2514
2515static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2516 const struct cntr_entry *entry,
2517 void *context, int vl, int mode, u64 data)
2518{
2519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2520
2521 return dd->rcv_err_status_cnt[20];
2522}
2523
2524static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2525 const struct cntr_entry *entry,
2526 void *context, int vl, int mode, u64 data)
2527{
2528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2529
2530 return dd->rcv_err_status_cnt[19];
2531}
2532
2533static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2534 void *context, int vl,
2535 int mode, u64 data)
2536{
2537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2538
2539 return dd->rcv_err_status_cnt[18];
2540}
2541
2542static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2543 void *context, int vl,
2544 int mode, u64 data)
2545{
2546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2547
2548 return dd->rcv_err_status_cnt[17];
2549}
2550
2551static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2552 const struct cntr_entry *entry,
2553 void *context, int vl, int mode, u64 data)
2554{
2555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2556
2557 return dd->rcv_err_status_cnt[16];
2558}
2559
2560static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2561 const struct cntr_entry *entry,
2562 void *context, int vl, int mode, u64 data)
2563{
2564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2565
2566 return dd->rcv_err_status_cnt[15];
2567}
2568
2569static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2570 void *context, int vl,
2571 int mode, u64 data)
2572{
2573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2574
2575 return dd->rcv_err_status_cnt[14];
2576}
2577
2578static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2579 void *context, int vl,
2580 int mode, u64 data)
2581{
2582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2583
2584 return dd->rcv_err_status_cnt[13];
2585}
2586
2587static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2588 void *context, int vl, int mode,
2589 u64 data)
2590{
2591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2592
2593 return dd->rcv_err_status_cnt[12];
2594}
2595
2596static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2597 void *context, int vl, int mode,
2598 u64 data)
2599{
2600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2601
2602 return dd->rcv_err_status_cnt[11];
2603}
2604
2605static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2606 void *context, int vl, int mode,
2607 u64 data)
2608{
2609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2610
2611 return dd->rcv_err_status_cnt[10];
2612}
2613
2614static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2615 void *context, int vl, int mode,
2616 u64 data)
2617{
2618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2619
2620 return dd->rcv_err_status_cnt[9];
2621}
2622
2623static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2624 void *context, int vl, int mode,
2625 u64 data)
2626{
2627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2628
2629 return dd->rcv_err_status_cnt[8];
2630}
2631
2632static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2633 const struct cntr_entry *entry,
2634 void *context, int vl, int mode, u64 data)
2635{
2636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2637
2638 return dd->rcv_err_status_cnt[7];
2639}
2640
2641static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2642 const struct cntr_entry *entry,
2643 void *context, int vl, int mode, u64 data)
2644{
2645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2646
2647 return dd->rcv_err_status_cnt[6];
2648}
2649
2650static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2651 void *context, int vl, int mode,
2652 u64 data)
2653{
2654 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2655
2656 return dd->rcv_err_status_cnt[5];
2657}
2658
2659static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2660 void *context, int vl, int mode,
2661 u64 data)
2662{
2663 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2664
2665 return dd->rcv_err_status_cnt[4];
2666}
2667
2668static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2669 void *context, int vl, int mode,
2670 u64 data)
2671{
2672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2673
2674 return dd->rcv_err_status_cnt[3];
2675}
2676
2677static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2678 void *context, int vl, int mode,
2679 u64 data)
2680{
2681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2682
2683 return dd->rcv_err_status_cnt[2];
2684}
2685
2686static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2687 void *context, int vl, int mode,
2688 u64 data)
2689{
2690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2691
2692 return dd->rcv_err_status_cnt[1];
2693}
2694
2695static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2696 void *context, int vl, int mode,
2697 u64 data)
2698{
2699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2700
2701 return dd->rcv_err_status_cnt[0];
2702}
2703
2704/*
2705 * Software counters corresponding to each of the
2706 * error status bits within SendPioErrStatus
2707 */
2708static u64 access_pio_pec_sop_head_parity_err_cnt(
2709 const struct cntr_entry *entry,
2710 void *context, int vl, int mode, u64 data)
2711{
2712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2713
2714 return dd->send_pio_err_status_cnt[35];
2715}
2716
2717static u64 access_pio_pcc_sop_head_parity_err_cnt(
2718 const struct cntr_entry *entry,
2719 void *context, int vl, int mode, u64 data)
2720{
2721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2722
2723 return dd->send_pio_err_status_cnt[34];
2724}
2725
2726static u64 access_pio_last_returned_cnt_parity_err_cnt(
2727 const struct cntr_entry *entry,
2728 void *context, int vl, int mode, u64 data)
2729{
2730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2731
2732 return dd->send_pio_err_status_cnt[33];
2733}
2734
2735static u64 access_pio_current_free_cnt_parity_err_cnt(
2736 const struct cntr_entry *entry,
2737 void *context, int vl, int mode, u64 data)
2738{
2739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2740
2741 return dd->send_pio_err_status_cnt[32];
2742}
2743
2744static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2745 void *context, int vl, int mode,
2746 u64 data)
2747{
2748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2749
2750 return dd->send_pio_err_status_cnt[31];
2751}
2752
2753static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2754 void *context, int vl, int mode,
2755 u64 data)
2756{
2757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2758
2759 return dd->send_pio_err_status_cnt[30];
2760}
2761
2762static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2763 void *context, int vl, int mode,
2764 u64 data)
2765{
2766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2767
2768 return dd->send_pio_err_status_cnt[29];
2769}
2770
2771static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2772 const struct cntr_entry *entry,
2773 void *context, int vl, int mode, u64 data)
2774{
2775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2776
2777 return dd->send_pio_err_status_cnt[28];
2778}
2779
2780static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2781 void *context, int vl, int mode,
2782 u64 data)
2783{
2784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2785
2786 return dd->send_pio_err_status_cnt[27];
2787}
2788
2789static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2790 void *context, int vl, int mode,
2791 u64 data)
2792{
2793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2794
2795 return dd->send_pio_err_status_cnt[26];
2796}
2797
2798static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2799 void *context, int vl,
2800 int mode, u64 data)
2801{
2802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2803
2804 return dd->send_pio_err_status_cnt[25];
2805}
2806
2807static u64 access_pio_block_qw_count_parity_err_cnt(
2808 const struct cntr_entry *entry,
2809 void *context, int vl, int mode, u64 data)
2810{
2811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2812
2813 return dd->send_pio_err_status_cnt[24];
2814}
2815
2816static u64 access_pio_write_qw_valid_parity_err_cnt(
2817 const struct cntr_entry *entry,
2818 void *context, int vl, int mode, u64 data)
2819{
2820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2821
2822 return dd->send_pio_err_status_cnt[23];
2823}
2824
2825static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2826 void *context, int vl, int mode,
2827 u64 data)
2828{
2829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2830
2831 return dd->send_pio_err_status_cnt[22];
2832}
2833
2834static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2835 void *context, int vl,
2836 int mode, u64 data)
2837{
2838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2839
2840 return dd->send_pio_err_status_cnt[21];
2841}
2842
2843static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2844 void *context, int vl,
2845 int mode, u64 data)
2846{
2847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2848
2849 return dd->send_pio_err_status_cnt[20];
2850}
2851
2852static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2853 void *context, int vl,
2854 int mode, u64 data)
2855{
2856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2857
2858 return dd->send_pio_err_status_cnt[19];
2859}
2860
2861static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2862 const struct cntr_entry *entry,
2863 void *context, int vl, int mode, u64 data)
2864{
2865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2866
2867 return dd->send_pio_err_status_cnt[18];
2868}
2869
2870static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2871 void *context, int vl, int mode,
2872 u64 data)
2873{
2874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2875
2876 return dd->send_pio_err_status_cnt[17];
2877}
2878
2879static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2880 void *context, int vl, int mode,
2881 u64 data)
2882{
2883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2884
2885 return dd->send_pio_err_status_cnt[16];
2886}
2887
2888static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2889 const struct cntr_entry *entry,
2890 void *context, int vl, int mode, u64 data)
2891{
2892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2893
2894 return dd->send_pio_err_status_cnt[15];
2895}
2896
2897static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2898 const struct cntr_entry *entry,
2899 void *context, int vl, int mode, u64 data)
2900{
2901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2902
2903 return dd->send_pio_err_status_cnt[14];
2904}
2905
2906static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2907 const struct cntr_entry *entry,
2908 void *context, int vl, int mode, u64 data)
2909{
2910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2911
2912 return dd->send_pio_err_status_cnt[13];
2913}
2914
2915static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2916 const struct cntr_entry *entry,
2917 void *context, int vl, int mode, u64 data)
2918{
2919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2920
2921 return dd->send_pio_err_status_cnt[12];
2922}
2923
2924static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2925 const struct cntr_entry *entry,
2926 void *context, int vl, int mode, u64 data)
2927{
2928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2929
2930 return dd->send_pio_err_status_cnt[11];
2931}
2932
2933static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2934 const struct cntr_entry *entry,
2935 void *context, int vl, int mode, u64 data)
2936{
2937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2938
2939 return dd->send_pio_err_status_cnt[10];
2940}
2941
2942static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2943 const struct cntr_entry *entry,
2944 void *context, int vl, int mode, u64 data)
2945{
2946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2947
2948 return dd->send_pio_err_status_cnt[9];
2949}
2950
2951static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2952 const struct cntr_entry *entry,
2953 void *context, int vl, int mode, u64 data)
2954{
2955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2956
2957 return dd->send_pio_err_status_cnt[8];
2958}
2959
2960static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2961 const struct cntr_entry *entry,
2962 void *context, int vl, int mode, u64 data)
2963{
2964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2965
2966 return dd->send_pio_err_status_cnt[7];
2967}
2968
2969static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2970 void *context, int vl, int mode,
2971 u64 data)
2972{
2973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2974
2975 return dd->send_pio_err_status_cnt[6];
2976}
2977
2978static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2979 void *context, int vl, int mode,
2980 u64 data)
2981{
2982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2983
2984 return dd->send_pio_err_status_cnt[5];
2985}
2986
2987static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2988 void *context, int vl, int mode,
2989 u64 data)
2990{
2991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2992
2993 return dd->send_pio_err_status_cnt[4];
2994}
2995
2996static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2997 void *context, int vl, int mode,
2998 u64 data)
2999{
3000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3001
3002 return dd->send_pio_err_status_cnt[3];
3003}
3004
3005static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3006 void *context, int vl, int mode,
3007 u64 data)
3008{
3009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3010
3011 return dd->send_pio_err_status_cnt[2];
3012}
3013
3014static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3015 void *context, int vl,
3016 int mode, u64 data)
3017{
3018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3019
3020 return dd->send_pio_err_status_cnt[1];
3021}
3022
3023static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3024 void *context, int vl, int mode,
3025 u64 data)
3026{
3027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3028
3029 return dd->send_pio_err_status_cnt[0];
3030}
3031
3032/*
3033 * Software counters corresponding to each of the
3034 * error status bits within SendDmaErrStatus
3035 */
3036static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3037 const struct cntr_entry *entry,
3038 void *context, int vl, int mode, u64 data)
3039{
3040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3041
3042 return dd->send_dma_err_status_cnt[3];
3043}
3044
3045static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3046 const struct cntr_entry *entry,
3047 void *context, int vl, int mode, u64 data)
3048{
3049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3050
3051 return dd->send_dma_err_status_cnt[2];
3052}
3053
3054static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3055 void *context, int vl, int mode,
3056 u64 data)
3057{
3058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3059
3060 return dd->send_dma_err_status_cnt[1];
3061}
3062
3063static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3064 void *context, int vl, int mode,
3065 u64 data)
3066{
3067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3068
3069 return dd->send_dma_err_status_cnt[0];
3070}
3071
3072/*
3073 * Software counters corresponding to each of the
3074 * error status bits within SendEgressErrStatus
3075 */
3076static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3077 const struct cntr_entry *entry,
3078 void *context, int vl, int mode, u64 data)
3079{
3080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3081
3082 return dd->send_egress_err_status_cnt[63];
3083}
3084
3085static u64 access_tx_read_sdma_memory_csr_err_cnt(
3086 const struct cntr_entry *entry,
3087 void *context, int vl, int mode, u64 data)
3088{
3089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3090
3091 return dd->send_egress_err_status_cnt[62];
3092}
3093
3094static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3095 void *context, int vl, int mode,
3096 u64 data)
3097{
3098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3099
3100 return dd->send_egress_err_status_cnt[61];
3101}
3102
3103static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3104 void *context, int vl,
3105 int mode, u64 data)
3106{
3107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3108
3109 return dd->send_egress_err_status_cnt[60];
3110}
3111
3112static u64 access_tx_read_sdma_memory_cor_err_cnt(
3113 const struct cntr_entry *entry,
3114 void *context, int vl, int mode, u64 data)
3115{
3116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3117
3118 return dd->send_egress_err_status_cnt[59];
3119}
3120
3121static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3122 void *context, int vl, int mode,
3123 u64 data)
3124{
3125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3126
3127 return dd->send_egress_err_status_cnt[58];
3128}
3129
3130static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3131 void *context, int vl, int mode,
3132 u64 data)
3133{
3134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3135
3136 return dd->send_egress_err_status_cnt[57];
3137}
3138
3139static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3140 void *context, int vl, int mode,
3141 u64 data)
3142{
3143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3144
3145 return dd->send_egress_err_status_cnt[56];
3146}
3147
3148static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3149 void *context, int vl, int mode,
3150 u64 data)
3151{
3152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3153
3154 return dd->send_egress_err_status_cnt[55];
3155}
3156
3157static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3158 void *context, int vl, int mode,
3159 u64 data)
3160{
3161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3162
3163 return dd->send_egress_err_status_cnt[54];
3164}
3165
3166static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3167 void *context, int vl, int mode,
3168 u64 data)
3169{
3170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3171
3172 return dd->send_egress_err_status_cnt[53];
3173}
3174
3175static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3176 void *context, int vl, int mode,
3177 u64 data)
3178{
3179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3180
3181 return dd->send_egress_err_status_cnt[52];
3182}
3183
3184static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3185 void *context, int vl, int mode,
3186 u64 data)
3187{
3188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3189
3190 return dd->send_egress_err_status_cnt[51];
3191}
3192
3193static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3194 void *context, int vl, int mode,
3195 u64 data)
3196{
3197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3198
3199 return dd->send_egress_err_status_cnt[50];
3200}
3201
3202static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3203 void *context, int vl, int mode,
3204 u64 data)
3205{
3206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3207
3208 return dd->send_egress_err_status_cnt[49];
3209}
3210
3211static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3212 void *context, int vl, int mode,
3213 u64 data)
3214{
3215 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3216
3217 return dd->send_egress_err_status_cnt[48];
3218}
3219
3220static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3221 void *context, int vl, int mode,
3222 u64 data)
3223{
3224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3225
3226 return dd->send_egress_err_status_cnt[47];
3227}
3228
3229static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3230 void *context, int vl, int mode,
3231 u64 data)
3232{
3233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3234
3235 return dd->send_egress_err_status_cnt[46];
3236}
3237
3238static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3239 void *context, int vl, int mode,
3240 u64 data)
3241{
3242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3243
3244 return dd->send_egress_err_status_cnt[45];
3245}
3246
3247static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3248 void *context, int vl,
3249 int mode, u64 data)
3250{
3251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3252
3253 return dd->send_egress_err_status_cnt[44];
3254}
3255
3256static u64 access_tx_read_sdma_memory_unc_err_cnt(
3257 const struct cntr_entry *entry,
3258 void *context, int vl, int mode, u64 data)
3259{
3260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3261
3262 return dd->send_egress_err_status_cnt[43];
3263}
3264
3265static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3266 void *context, int vl, int mode,
3267 u64 data)
3268{
3269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3270
3271 return dd->send_egress_err_status_cnt[42];
3272}
3273
3274static u64 access_tx_credit_return_partiy_err_cnt(
3275 const struct cntr_entry *entry,
3276 void *context, int vl, int mode, u64 data)
3277{
3278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3279
3280 return dd->send_egress_err_status_cnt[41];
3281}
3282
3283static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3284 const struct cntr_entry *entry,
3285 void *context, int vl, int mode, u64 data)
3286{
3287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3288
3289 return dd->send_egress_err_status_cnt[40];
3290}
3291
3292static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3293 const struct cntr_entry *entry,
3294 void *context, int vl, int mode, u64 data)
3295{
3296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3297
3298 return dd->send_egress_err_status_cnt[39];
3299}
3300
3301static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3302 const struct cntr_entry *entry,
3303 void *context, int vl, int mode, u64 data)
3304{
3305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3306
3307 return dd->send_egress_err_status_cnt[38];
3308}
3309
3310static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3311 const struct cntr_entry *entry,
3312 void *context, int vl, int mode, u64 data)
3313{
3314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3315
3316 return dd->send_egress_err_status_cnt[37];
3317}
3318
3319static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3320 const struct cntr_entry *entry,
3321 void *context, int vl, int mode, u64 data)
3322{
3323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3324
3325 return dd->send_egress_err_status_cnt[36];
3326}
3327
3328static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3329 const struct cntr_entry *entry,
3330 void *context, int vl, int mode, u64 data)
3331{
3332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3333
3334 return dd->send_egress_err_status_cnt[35];
3335}
3336
3337static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3338 const struct cntr_entry *entry,
3339 void *context, int vl, int mode, u64 data)
3340{
3341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3342
3343 return dd->send_egress_err_status_cnt[34];
3344}
3345
3346static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3347 const struct cntr_entry *entry,
3348 void *context, int vl, int mode, u64 data)
3349{
3350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3351
3352 return dd->send_egress_err_status_cnt[33];
3353}
3354
3355static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3356 const struct cntr_entry *entry,
3357 void *context, int vl, int mode, u64 data)
3358{
3359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3360
3361 return dd->send_egress_err_status_cnt[32];
3362}
3363
3364static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3365 const struct cntr_entry *entry,
3366 void *context, int vl, int mode, u64 data)
3367{
3368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3369
3370 return dd->send_egress_err_status_cnt[31];
3371}
3372
3373static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3374 const struct cntr_entry *entry,
3375 void *context, int vl, int mode, u64 data)
3376{
3377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3378
3379 return dd->send_egress_err_status_cnt[30];
3380}
3381
3382static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3383 const struct cntr_entry *entry,
3384 void *context, int vl, int mode, u64 data)
3385{
3386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3387
3388 return dd->send_egress_err_status_cnt[29];
3389}
3390
3391static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3392 const struct cntr_entry *entry,
3393 void *context, int vl, int mode, u64 data)
3394{
3395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3396
3397 return dd->send_egress_err_status_cnt[28];
3398}
3399
3400static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3401 const struct cntr_entry *entry,
3402 void *context, int vl, int mode, u64 data)
3403{
3404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3405
3406 return dd->send_egress_err_status_cnt[27];
3407}
3408
3409static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3410 const struct cntr_entry *entry,
3411 void *context, int vl, int mode, u64 data)
3412{
3413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3414
3415 return dd->send_egress_err_status_cnt[26];
3416}
3417
3418static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3419 const struct cntr_entry *entry,
3420 void *context, int vl, int mode, u64 data)
3421{
3422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3423
3424 return dd->send_egress_err_status_cnt[25];
3425}
3426
3427static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3428 const struct cntr_entry *entry,
3429 void *context, int vl, int mode, u64 data)
3430{
3431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3432
3433 return dd->send_egress_err_status_cnt[24];
3434}
3435
3436static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3437 const struct cntr_entry *entry,
3438 void *context, int vl, int mode, u64 data)
3439{
3440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3441
3442 return dd->send_egress_err_status_cnt[23];
3443}
3444
3445static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3446 const struct cntr_entry *entry,
3447 void *context, int vl, int mode, u64 data)
3448{
3449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3450
3451 return dd->send_egress_err_status_cnt[22];
3452}
3453
3454static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3455 const struct cntr_entry *entry,
3456 void *context, int vl, int mode, u64 data)
3457{
3458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3459
3460 return dd->send_egress_err_status_cnt[21];
3461}
3462
3463static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3464 const struct cntr_entry *entry,
3465 void *context, int vl, int mode, u64 data)
3466{
3467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3468
3469 return dd->send_egress_err_status_cnt[20];
3470}
3471
3472static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3473 const struct cntr_entry *entry,
3474 void *context, int vl, int mode, u64 data)
3475{
3476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3477
3478 return dd->send_egress_err_status_cnt[19];
3479}
3480
3481static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3482 const struct cntr_entry *entry,
3483 void *context, int vl, int mode, u64 data)
3484{
3485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3486
3487 return dd->send_egress_err_status_cnt[18];
3488}
3489
3490static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3491 const struct cntr_entry *entry,
3492 void *context, int vl, int mode, u64 data)
3493{
3494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3495
3496 return dd->send_egress_err_status_cnt[17];
3497}
3498
3499static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3500 const struct cntr_entry *entry,
3501 void *context, int vl, int mode, u64 data)
3502{
3503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3504
3505 return dd->send_egress_err_status_cnt[16];
3506}
3507
3508static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3509 void *context, int vl, int mode,
3510 u64 data)
3511{
3512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3513
3514 return dd->send_egress_err_status_cnt[15];
3515}
3516
3517static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3518 void *context, int vl,
3519 int mode, u64 data)
3520{
3521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3522
3523 return dd->send_egress_err_status_cnt[14];
3524}
3525
3526static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3527 void *context, int vl, int mode,
3528 u64 data)
3529{
3530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3531
3532 return dd->send_egress_err_status_cnt[13];
3533}
3534
3535static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3536 void *context, int vl, int mode,
3537 u64 data)
3538{
3539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3540
3541 return dd->send_egress_err_status_cnt[12];
3542}
3543
3544static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3545 const struct cntr_entry *entry,
3546 void *context, int vl, int mode, u64 data)
3547{
3548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3549
3550 return dd->send_egress_err_status_cnt[11];
3551}
3552
3553static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3554 void *context, int vl, int mode,
3555 u64 data)
3556{
3557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3558
3559 return dd->send_egress_err_status_cnt[10];
3560}
3561
3562static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3563 void *context, int vl, int mode,
3564 u64 data)
3565{
3566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3567
3568 return dd->send_egress_err_status_cnt[9];
3569}
3570
3571static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3572 const struct cntr_entry *entry,
3573 void *context, int vl, int mode, u64 data)
3574{
3575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3576
3577 return dd->send_egress_err_status_cnt[8];
3578}
3579
3580static u64 access_tx_pio_launch_intf_parity_err_cnt(
3581 const struct cntr_entry *entry,
3582 void *context, int vl, int mode, u64 data)
3583{
3584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3585
3586 return dd->send_egress_err_status_cnt[7];
3587}
3588
3589static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3590 void *context, int vl, int mode,
3591 u64 data)
3592{
3593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3594
3595 return dd->send_egress_err_status_cnt[6];
3596}
3597
3598static u64 access_tx_incorrect_link_state_err_cnt(
3599 const struct cntr_entry *entry,
3600 void *context, int vl, int mode, u64 data)
3601{
3602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3603
3604 return dd->send_egress_err_status_cnt[5];
3605}
3606
3607static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3608 void *context, int vl, int mode,
3609 u64 data)
3610{
3611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3612
3613 return dd->send_egress_err_status_cnt[4];
3614}
3615
3616static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3617 const struct cntr_entry *entry,
3618 void *context, int vl, int mode, u64 data)
3619{
3620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3621
3622 return dd->send_egress_err_status_cnt[3];
3623}
3624
3625static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3626 void *context, int vl, int mode,
3627 u64 data)
3628{
3629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3630
3631 return dd->send_egress_err_status_cnt[2];
3632}
3633
3634static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3635 const struct cntr_entry *entry,
3636 void *context, int vl, int mode, u64 data)
3637{
3638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3639
3640 return dd->send_egress_err_status_cnt[1];
3641}
3642
3643static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3644 const struct cntr_entry *entry,
3645 void *context, int vl, int mode, u64 data)
3646{
3647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3648
3649 return dd->send_egress_err_status_cnt[0];
3650}
3651
3652/*
3653 * Software counters corresponding to each of the
3654 * error status bits within SendErrStatus
3655 */
3656static u64 access_send_csr_write_bad_addr_err_cnt(
3657 const struct cntr_entry *entry,
3658 void *context, int vl, int mode, u64 data)
3659{
3660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3661
3662 return dd->send_err_status_cnt[2];
3663}
3664
3665static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3666 void *context, int vl,
3667 int mode, u64 data)
3668{
3669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3670
3671 return dd->send_err_status_cnt[1];
3672}
3673
3674static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3675 void *context, int vl, int mode,
3676 u64 data)
3677{
3678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3679
3680 return dd->send_err_status_cnt[0];
3681}
3682
3683/*
3684 * Software counters corresponding to each of the
3685 * error status bits within SendCtxtErrStatus
3686 */
3687static u64 access_pio_write_out_of_bounds_err_cnt(
3688 const struct cntr_entry *entry,
3689 void *context, int vl, int mode, u64 data)
3690{
3691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3692
3693 return dd->sw_ctxt_err_status_cnt[4];
3694}
3695
3696static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3697 void *context, int vl, int mode,
3698 u64 data)
3699{
3700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3701
3702 return dd->sw_ctxt_err_status_cnt[3];
3703}
3704
3705static u64 access_pio_write_crosses_boundary_err_cnt(
3706 const struct cntr_entry *entry,
3707 void *context, int vl, int mode, u64 data)
3708{
3709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3710
3711 return dd->sw_ctxt_err_status_cnt[2];
3712}
3713
3714static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3715 void *context, int vl,
3716 int mode, u64 data)
3717{
3718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3719
3720 return dd->sw_ctxt_err_status_cnt[1];
3721}
3722
3723static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3724 void *context, int vl, int mode,
3725 u64 data)
3726{
3727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3728
3729 return dd->sw_ctxt_err_status_cnt[0];
3730}
3731
3732/*
3733 * Software counters corresponding to each of the
3734 * error status bits within SendDmaEngErrStatus
3735 */
3736static u64 access_sdma_header_request_fifo_cor_err_cnt(
3737 const struct cntr_entry *entry,
3738 void *context, int vl, int mode, u64 data)
3739{
3740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3741
3742 return dd->sw_send_dma_eng_err_status_cnt[23];
3743}
3744
3745static u64 access_sdma_header_storage_cor_err_cnt(
3746 const struct cntr_entry *entry,
3747 void *context, int vl, int mode, u64 data)
3748{
3749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3750
3751 return dd->sw_send_dma_eng_err_status_cnt[22];
3752}
3753
3754static u64 access_sdma_packet_tracking_cor_err_cnt(
3755 const struct cntr_entry *entry,
3756 void *context, int vl, int mode, u64 data)
3757{
3758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3759
3760 return dd->sw_send_dma_eng_err_status_cnt[21];
3761}
3762
3763static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3764 void *context, int vl, int mode,
3765 u64 data)
3766{
3767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3768
3769 return dd->sw_send_dma_eng_err_status_cnt[20];
3770}
3771
3772static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3773 void *context, int vl, int mode,
3774 u64 data)
3775{
3776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3777
3778 return dd->sw_send_dma_eng_err_status_cnt[19];
3779}
3780
3781static u64 access_sdma_header_request_fifo_unc_err_cnt(
3782 const struct cntr_entry *entry,
3783 void *context, int vl, int mode, u64 data)
3784{
3785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3786
3787 return dd->sw_send_dma_eng_err_status_cnt[18];
3788}
3789
3790static u64 access_sdma_header_storage_unc_err_cnt(
3791 const struct cntr_entry *entry,
3792 void *context, int vl, int mode, u64 data)
3793{
3794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3795
3796 return dd->sw_send_dma_eng_err_status_cnt[17];
3797}
3798
3799static u64 access_sdma_packet_tracking_unc_err_cnt(
3800 const struct cntr_entry *entry,
3801 void *context, int vl, int mode, u64 data)
3802{
3803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3804
3805 return dd->sw_send_dma_eng_err_status_cnt[16];
3806}
3807
3808static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3809 void *context, int vl, int mode,
3810 u64 data)
3811{
3812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3813
3814 return dd->sw_send_dma_eng_err_status_cnt[15];
3815}
3816
3817static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3818 void *context, int vl, int mode,
3819 u64 data)
3820{
3821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3822
3823 return dd->sw_send_dma_eng_err_status_cnt[14];
3824}
3825
3826static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3827 void *context, int vl, int mode,
3828 u64 data)
3829{
3830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3831
3832 return dd->sw_send_dma_eng_err_status_cnt[13];
3833}
3834
3835static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3836 void *context, int vl, int mode,
3837 u64 data)
3838{
3839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3840
3841 return dd->sw_send_dma_eng_err_status_cnt[12];
3842}
3843
3844static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3845 void *context, int vl, int mode,
3846 u64 data)
3847{
3848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3849
3850 return dd->sw_send_dma_eng_err_status_cnt[11];
3851}
3852
3853static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3854 void *context, int vl, int mode,
3855 u64 data)
3856{
3857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3858
3859 return dd->sw_send_dma_eng_err_status_cnt[10];
3860}
3861
3862static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3863 void *context, int vl, int mode,
3864 u64 data)
3865{
3866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3867
3868 return dd->sw_send_dma_eng_err_status_cnt[9];
3869}
3870
3871static u64 access_sdma_packet_desc_overflow_err_cnt(
3872 const struct cntr_entry *entry,
3873 void *context, int vl, int mode, u64 data)
3874{
3875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3876
3877 return dd->sw_send_dma_eng_err_status_cnt[8];
3878}
3879
3880static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3881 void *context, int vl,
3882 int mode, u64 data)
3883{
3884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3885
3886 return dd->sw_send_dma_eng_err_status_cnt[7];
3887}
3888
3889static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3890 void *context, int vl, int mode, u64 data)
3891{
3892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3893
3894 return dd->sw_send_dma_eng_err_status_cnt[6];
3895}
3896
3897static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3898 void *context, int vl, int mode,
3899 u64 data)
3900{
3901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3902
3903 return dd->sw_send_dma_eng_err_status_cnt[5];
3904}
3905
3906static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3907 void *context, int vl, int mode,
3908 u64 data)
3909{
3910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3911
3912 return dd->sw_send_dma_eng_err_status_cnt[4];
3913}
3914
3915static u64 access_sdma_tail_out_of_bounds_err_cnt(
3916 const struct cntr_entry *entry,
3917 void *context, int vl, int mode, u64 data)
3918{
3919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3920
3921 return dd->sw_send_dma_eng_err_status_cnt[3];
3922}
3923
3924static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3925 void *context, int vl, int mode,
3926 u64 data)
3927{
3928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3929
3930 return dd->sw_send_dma_eng_err_status_cnt[2];
3931}
3932
3933static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3934 void *context, int vl, int mode,
3935 u64 data)
3936{
3937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3938
3939 return dd->sw_send_dma_eng_err_status_cnt[1];
3940}
3941
3942static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3943 void *context, int vl, int mode,
3944 u64 data)
3945{
3946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3947
3948 return dd->sw_send_dma_eng_err_status_cnt[0];
3949}
3950
Mike Marciniszyn77241052015-07-30 15:17:43 -04003951#define def_access_sw_cpu(cntr) \
3952static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3953 void *context, int vl, int mode, u64 data) \
3954{ \
3955 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003956 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3957 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003958 mode, data); \
3959}
3960
3961def_access_sw_cpu(rc_acks);
3962def_access_sw_cpu(rc_qacks);
3963def_access_sw_cpu(rc_delayed_comp);
3964
3965#define def_access_ibp_counter(cntr) \
3966static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3967 void *context, int vl, int mode, u64 data) \
3968{ \
3969 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3970 \
3971 if (vl != CNTR_INVALID_VL) \
3972 return 0; \
3973 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003974 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003975 mode, data); \
3976}
3977
3978def_access_ibp_counter(loop_pkts);
3979def_access_ibp_counter(rc_resends);
3980def_access_ibp_counter(rnr_naks);
3981def_access_ibp_counter(other_naks);
3982def_access_ibp_counter(rc_timeouts);
3983def_access_ibp_counter(pkt_drops);
3984def_access_ibp_counter(dmawait);
3985def_access_ibp_counter(rc_seqnak);
3986def_access_ibp_counter(rc_dupreq);
3987def_access_ibp_counter(rdma_seq);
3988def_access_ibp_counter(unaligned);
3989def_access_ibp_counter(seq_naks);
3990
3991static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3992[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3993[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3994 CNTR_NORMAL),
3995[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3996 CNTR_NORMAL),
3997[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3998 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3999 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004000[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4001 CNTR_NORMAL),
4002[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4003 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4004[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4005 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4006[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4007 CNTR_NORMAL),
4008[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4009 CNTR_NORMAL),
4010[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4011 CNTR_NORMAL),
4012[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4013 CNTR_NORMAL),
4014[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4015 CNTR_NORMAL),
4016[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4017 CNTR_NORMAL),
4018[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4019 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4020[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4021 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4022[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4023 CNTR_SYNTH),
4024[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4025[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4026 CNTR_SYNTH),
4027[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4028 CNTR_SYNTH),
4029[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4030 CNTR_SYNTH),
4031[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4032 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4033[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4034 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4035 CNTR_SYNTH),
4036[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4037 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4038[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4039 CNTR_SYNTH),
4040[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4041 CNTR_SYNTH),
4042[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4043 CNTR_SYNTH),
4044[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4045 CNTR_SYNTH),
4046[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4047 CNTR_SYNTH),
4048[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4049 CNTR_SYNTH),
4050[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4051 CNTR_SYNTH),
4052[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4053 CNTR_SYNTH | CNTR_VL),
4054[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4055 CNTR_SYNTH | CNTR_VL),
4056[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4057[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4058 CNTR_SYNTH | CNTR_VL),
4059[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4060[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4061 CNTR_SYNTH | CNTR_VL),
4062[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4063 CNTR_SYNTH),
4064[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4065 CNTR_SYNTH | CNTR_VL),
4066[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4067 CNTR_SYNTH),
4068[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4069 CNTR_SYNTH | CNTR_VL),
4070[C_DC_TOTAL_CRC] =
4071 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4072 CNTR_SYNTH),
4073[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4074 CNTR_SYNTH),
4075[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4076 CNTR_SYNTH),
4077[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4078 CNTR_SYNTH),
4079[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4080 CNTR_SYNTH),
4081[C_DC_CRC_MULT_LN] =
4082 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4083 CNTR_SYNTH),
4084[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4085 CNTR_SYNTH),
4086[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4087 CNTR_SYNTH),
4088[C_DC_SEQ_CRC_CNT] =
4089 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4090 CNTR_SYNTH),
4091[C_DC_ESC0_ONLY_CNT] =
4092 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4093 CNTR_SYNTH),
4094[C_DC_ESC0_PLUS1_CNT] =
4095 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4096 CNTR_SYNTH),
4097[C_DC_ESC0_PLUS2_CNT] =
4098 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4099 CNTR_SYNTH),
4100[C_DC_REINIT_FROM_PEER_CNT] =
4101 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4102 CNTR_SYNTH),
4103[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4104 CNTR_SYNTH),
4105[C_DC_MISC_FLG_CNT] =
4106 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4107 CNTR_SYNTH),
4108[C_DC_PRF_GOOD_LTP_CNT] =
4109 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4110[C_DC_PRF_ACCEPTED_LTP_CNT] =
4111 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4112 CNTR_SYNTH),
4113[C_DC_PRF_RX_FLIT_CNT] =
4114 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4115[C_DC_PRF_TX_FLIT_CNT] =
4116 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4117[C_DC_PRF_CLK_CNTR] =
4118 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4119[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4120 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4121[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4122 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4123 CNTR_SYNTH),
4124[C_DC_PG_STS_TX_SBE_CNT] =
4125 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4126[C_DC_PG_STS_TX_MBE_CNT] =
4127 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4128 CNTR_SYNTH),
4129[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4130 access_sw_cpu_intr),
4131[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4132 access_sw_cpu_rcv_limit),
4133[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4134 access_sw_vtx_wait),
4135[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4136 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004137[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4138 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004139[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4140 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004141[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4142 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004143[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4144 SEND_DMA_DESC_FETCHED_CNT, 0,
4145 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4146 dev_access_u32_csr),
4147[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4148 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4149 access_sde_int_cnt),
4150[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4151 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4152 access_sde_err_cnt),
4153[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4154 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4155 access_sde_idle_int_cnt),
4156[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4157 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4158 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004159/* MISC_ERR_STATUS */
4160[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4161 CNTR_NORMAL,
4162 access_misc_pll_lock_fail_err_cnt),
4163[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4164 CNTR_NORMAL,
4165 access_misc_mbist_fail_err_cnt),
4166[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4167 CNTR_NORMAL,
4168 access_misc_invalid_eep_cmd_err_cnt),
4169[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4170 CNTR_NORMAL,
4171 access_misc_efuse_done_parity_err_cnt),
4172[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4173 CNTR_NORMAL,
4174 access_misc_efuse_write_err_cnt),
4175[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4176 0, CNTR_NORMAL,
4177 access_misc_efuse_read_bad_addr_err_cnt),
4178[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4179 CNTR_NORMAL,
4180 access_misc_efuse_csr_parity_err_cnt),
4181[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4182 CNTR_NORMAL,
4183 access_misc_fw_auth_failed_err_cnt),
4184[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4185 CNTR_NORMAL,
4186 access_misc_key_mismatch_err_cnt),
4187[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4188 CNTR_NORMAL,
4189 access_misc_sbus_write_failed_err_cnt),
4190[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4191 CNTR_NORMAL,
4192 access_misc_csr_write_bad_addr_err_cnt),
4193[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4194 CNTR_NORMAL,
4195 access_misc_csr_read_bad_addr_err_cnt),
4196[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4197 CNTR_NORMAL,
4198 access_misc_csr_parity_err_cnt),
4199/* CceErrStatus */
4200[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4201 CNTR_NORMAL,
4202 access_sw_cce_err_status_aggregated_cnt),
4203[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4204 CNTR_NORMAL,
4205 access_cce_msix_csr_parity_err_cnt),
4206[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4207 CNTR_NORMAL,
4208 access_cce_int_map_unc_err_cnt),
4209[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4210 CNTR_NORMAL,
4211 access_cce_int_map_cor_err_cnt),
4212[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4213 CNTR_NORMAL,
4214 access_cce_msix_table_unc_err_cnt),
4215[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4216 CNTR_NORMAL,
4217 access_cce_msix_table_cor_err_cnt),
4218[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4219 0, CNTR_NORMAL,
4220 access_cce_rxdma_conv_fifo_parity_err_cnt),
4221[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4222 0, CNTR_NORMAL,
4223 access_cce_rcpl_async_fifo_parity_err_cnt),
4224[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4225 CNTR_NORMAL,
4226 access_cce_seg_write_bad_addr_err_cnt),
4227[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4228 CNTR_NORMAL,
4229 access_cce_seg_read_bad_addr_err_cnt),
4230[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4231 CNTR_NORMAL,
4232 access_la_triggered_cnt),
4233[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4234 CNTR_NORMAL,
4235 access_cce_trgt_cpl_timeout_err_cnt),
4236[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_pcic_receive_parity_err_cnt),
4239[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_pcic_transmit_back_parity_err_cnt),
4242[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4243 0, CNTR_NORMAL,
4244 access_pcic_transmit_front_parity_err_cnt),
4245[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_pcic_cpl_dat_q_unc_err_cnt),
4248[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_pcic_cpl_hd_q_unc_err_cnt),
4251[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_pcic_post_dat_q_unc_err_cnt),
4254[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_pcic_post_hd_q_unc_err_cnt),
4257[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_pcic_retry_sot_mem_unc_err_cnt),
4260[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_pcic_retry_mem_unc_err),
4263[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_pcic_n_post_dat_q_parity_err_cnt),
4266[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_pcic_n_post_h_q_parity_err_cnt),
4269[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_pcic_cpl_dat_q_cor_err_cnt),
4272[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4273 CNTR_NORMAL,
4274 access_pcic_cpl_hd_q_cor_err_cnt),
4275[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_pcic_post_dat_q_cor_err_cnt),
4278[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4279 CNTR_NORMAL,
4280 access_pcic_post_hd_q_cor_err_cnt),
4281[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4282 CNTR_NORMAL,
4283 access_pcic_retry_sot_mem_cor_err_cnt),
4284[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4285 CNTR_NORMAL,
4286 access_pcic_retry_mem_cor_err_cnt),
4287[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4288 "CceCli1AsyncFifoDbgParityError", 0, 0,
4289 CNTR_NORMAL,
4290 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4291[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4292 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4293 CNTR_NORMAL,
4294 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4295 ),
4296[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4297 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4298 CNTR_NORMAL,
4299 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4300[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4301 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4304[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4305 0, CNTR_NORMAL,
4306 access_cce_cli2_async_fifo_parity_err_cnt),
4307[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_cce_csr_cfg_bus_parity_err_cnt),
4310[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4311 0, CNTR_NORMAL,
4312 access_cce_cli0_async_fifo_parity_err_cnt),
4313[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_cce_rspd_data_parity_err_cnt),
4316[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4317 CNTR_NORMAL,
4318 access_cce_trgt_access_err_cnt),
4319[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4320 0, CNTR_NORMAL,
4321 access_cce_trgt_async_fifo_parity_err_cnt),
4322[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4323 CNTR_NORMAL,
4324 access_cce_csr_write_bad_addr_err_cnt),
4325[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4326 CNTR_NORMAL,
4327 access_cce_csr_read_bad_addr_err_cnt),
4328[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4329 CNTR_NORMAL,
4330 access_ccs_csr_parity_err_cnt),
4331
4332/* RcvErrStatus */
4333[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_rx_csr_parity_err_cnt),
4336[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rx_csr_write_bad_addr_err_cnt),
4339[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rx_csr_read_bad_addr_err_cnt),
4342[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rx_dma_csr_unc_err_cnt),
4345[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_rx_dma_dq_fsm_encoding_err_cnt),
4348[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_rx_dma_eq_fsm_encoding_err_cnt),
4351[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_rx_dma_csr_parity_err_cnt),
4354[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_rx_rbuf_data_cor_err_cnt),
4357[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_rx_rbuf_data_unc_err_cnt),
4360[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_rx_dma_data_fifo_rd_cor_err_cnt),
4363[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_dma_data_fifo_rd_unc_err_cnt),
4366[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4369[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4372[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_rbuf_desc_part2_cor_err_cnt),
4375[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_rbuf_desc_part2_unc_err_cnt),
4378[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_rx_rbuf_desc_part1_cor_err_cnt),
4381[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_rx_rbuf_desc_part1_unc_err_cnt),
4384[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_hq_intr_fsm_err_cnt),
4387[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_hq_intr_csr_parity_err_cnt),
4390[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_rx_lookup_csr_parity_err_cnt),
4393[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_rx_lookup_rcv_array_cor_err_cnt),
4396[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_rx_lookup_rcv_array_unc_err_cnt),
4399[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4400 0, CNTR_NORMAL,
4401 access_rx_lookup_des_part2_parity_err_cnt),
4402[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4403 0, CNTR_NORMAL,
4404 access_rx_lookup_des_part1_unc_cor_err_cnt),
4405[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_rx_lookup_des_part1_unc_err_cnt),
4408[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_rbuf_next_free_buf_cor_err_cnt),
4411[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rx_rbuf_next_free_buf_unc_err_cnt),
4414[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4415 "RxRbufFlInitWrAddrParityErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4418[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4419 0, CNTR_NORMAL,
4420 access_rx_rbuf_fl_initdone_parity_err_cnt),
4421[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4422 0, CNTR_NORMAL,
4423 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4424[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4425 CNTR_NORMAL,
4426 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4427[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4428 CNTR_NORMAL,
4429 access_rx_rbuf_empty_err_cnt),
4430[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4431 CNTR_NORMAL,
4432 access_rx_rbuf_full_err_cnt),
4433[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4434 CNTR_NORMAL,
4435 access_rbuf_bad_lookup_err_cnt),
4436[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4437 CNTR_NORMAL,
4438 access_rbuf_ctx_id_parity_err_cnt),
4439[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4440 CNTR_NORMAL,
4441 access_rbuf_csr_qeopdw_parity_err_cnt),
4442[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4443 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4444 CNTR_NORMAL,
4445 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4446[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4447 "RxRbufCsrQTlPtrParityErr", 0, 0,
4448 CNTR_NORMAL,
4449 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4450[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4451 0, CNTR_NORMAL,
4452 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4453[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4454 0, CNTR_NORMAL,
4455 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4456[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4457 0, 0, CNTR_NORMAL,
4458 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4459[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4460 0, CNTR_NORMAL,
4461 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4462[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4463 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4466[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4467 0, CNTR_NORMAL,
4468 access_rx_rbuf_block_list_read_cor_err_cnt),
4469[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4470 0, CNTR_NORMAL,
4471 access_rx_rbuf_block_list_read_unc_err_cnt),
4472[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_rbuf_lookup_des_cor_err_cnt),
4475[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_rbuf_lookup_des_unc_err_cnt),
4478[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4479 "RxRbufLookupDesRegUncCorErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4482[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4485[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_rbuf_free_list_cor_err_cnt),
4488[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rbuf_free_list_unc_err_cnt),
4491[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_rcv_fsm_encoding_err_cnt),
4494[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_dma_flag_cor_err_cnt),
4497[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_dma_flag_unc_err_cnt),
4500[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_dc_sop_eop_parity_err_cnt),
4503[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_rcv_csr_parity_err_cnt),
4506[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_rcv_qp_map_table_cor_err_cnt),
4509[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_rx_rcv_qp_map_table_unc_err_cnt),
4512[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_rx_rcv_data_cor_err_cnt),
4515[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4516 CNTR_NORMAL,
4517 access_rx_rcv_data_unc_err_cnt),
4518[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4519 CNTR_NORMAL,
4520 access_rx_rcv_hdr_cor_err_cnt),
4521[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4522 CNTR_NORMAL,
4523 access_rx_rcv_hdr_unc_err_cnt),
4524[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4525 CNTR_NORMAL,
4526 access_rx_dc_intf_parity_err_cnt),
4527[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4528 CNTR_NORMAL,
4529 access_rx_dma_csr_cor_err_cnt),
4530/* SendPioErrStatus */
4531[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4532 CNTR_NORMAL,
4533 access_pio_pec_sop_head_parity_err_cnt),
4534[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_pcc_sop_head_parity_err_cnt),
4537[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4538 0, 0, CNTR_NORMAL,
4539 access_pio_last_returned_cnt_parity_err_cnt),
4540[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4541 0, CNTR_NORMAL,
4542 access_pio_current_free_cnt_parity_err_cnt),
4543[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4544 CNTR_NORMAL,
4545 access_pio_reserved_31_err_cnt),
4546[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4547 CNTR_NORMAL,
4548 access_pio_reserved_30_err_cnt),
4549[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_pio_ppmc_sop_len_err_cnt),
4552[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_pio_ppmc_bqc_mem_parity_err_cnt),
4555[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_pio_vl_fifo_parity_err_cnt),
4558[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_pio_vlf_sop_parity_err_cnt),
4561[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_vlf_v1_len_parity_err_cnt),
4564[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_block_qw_count_parity_err_cnt),
4567[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4568 CNTR_NORMAL,
4569 access_pio_write_qw_valid_parity_err_cnt),
4570[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4571 CNTR_NORMAL,
4572 access_pio_state_machine_err_cnt),
4573[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_write_data_parity_err_cnt),
4576[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_host_addr_mem_cor_err_cnt),
4579[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_host_addr_mem_unc_err_cnt),
4582[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4585[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_init_sm_in_err_cnt),
4588[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_ppmc_pbl_fifo_err_cnt),
4591[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4592 0, CNTR_NORMAL,
4593 access_pio_credit_ret_fifo_parity_err_cnt),
4594[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_v1_len_mem_bank1_cor_err_cnt),
4597[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_v1_len_mem_bank0_cor_err_cnt),
4600[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_v1_len_mem_bank1_unc_err_cnt),
4603[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_pio_v1_len_mem_bank0_unc_err_cnt),
4606[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_sm_pkt_reset_parity_err_cnt),
4609[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_pkt_evict_fifo_parity_err_cnt),
4612[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4613 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4616[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_sbrdctl_crrel_parity_err_cnt),
4619[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4620 CNTR_NORMAL,
4621 access_pio_pec_fifo_parity_err_cnt),
4622[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4623 CNTR_NORMAL,
4624 access_pio_pcc_fifo_parity_err_cnt),
4625[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4626 CNTR_NORMAL,
4627 access_pio_sb_mem_fifo1_err_cnt),
4628[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4629 CNTR_NORMAL,
4630 access_pio_sb_mem_fifo0_err_cnt),
4631[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4632 CNTR_NORMAL,
4633 access_pio_csr_parity_err_cnt),
4634[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4635 CNTR_NORMAL,
4636 access_pio_write_addr_parity_err_cnt),
4637[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4638 CNTR_NORMAL,
4639 access_pio_write_bad_ctxt_err_cnt),
4640/* SendDmaErrStatus */
4641[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4642 0, CNTR_NORMAL,
4643 access_sdma_pcie_req_tracking_cor_err_cnt),
4644[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4645 0, CNTR_NORMAL,
4646 access_sdma_pcie_req_tracking_unc_err_cnt),
4647[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_sdma_csr_parity_err_cnt),
4650[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_sdma_rpy_tag_err_cnt),
4653/* SendEgressErrStatus */
4654[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4655 CNTR_NORMAL,
4656 access_tx_read_pio_memory_csr_unc_err_cnt),
4657[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4658 0, CNTR_NORMAL,
4659 access_tx_read_sdma_memory_csr_err_cnt),
4660[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_tx_egress_fifo_cor_err_cnt),
4663[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_tx_read_pio_memory_cor_err_cnt),
4666[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_tx_read_sdma_memory_cor_err_cnt),
4669[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_tx_sb_hdr_cor_err_cnt),
4672[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_tx_credit_overrun_err_cnt),
4675[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_tx_launch_fifo8_cor_err_cnt),
4678[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_tx_launch_fifo7_cor_err_cnt),
4681[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_tx_launch_fifo6_cor_err_cnt),
4684[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_launch_fifo5_cor_err_cnt),
4687[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4688 CNTR_NORMAL,
4689 access_tx_launch_fifo4_cor_err_cnt),
4690[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_launch_fifo3_cor_err_cnt),
4693[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_launch_fifo2_cor_err_cnt),
4696[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_launch_fifo1_cor_err_cnt),
4699[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_launch_fifo0_cor_err_cnt),
4702[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4703 CNTR_NORMAL,
4704 access_tx_credit_return_vl_err_cnt),
4705[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4706 CNTR_NORMAL,
4707 access_tx_hcrc_insertion_err_cnt),
4708[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4709 CNTR_NORMAL,
4710 access_tx_egress_fifo_unc_err_cnt),
4711[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4712 CNTR_NORMAL,
4713 access_tx_read_pio_memory_unc_err_cnt),
4714[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4715 CNTR_NORMAL,
4716 access_tx_read_sdma_memory_unc_err_cnt),
4717[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4718 CNTR_NORMAL,
4719 access_tx_sb_hdr_unc_err_cnt),
4720[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4721 CNTR_NORMAL,
4722 access_tx_credit_return_partiy_err_cnt),
4723[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4724 0, 0, CNTR_NORMAL,
4725 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4726[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4727 0, 0, CNTR_NORMAL,
4728 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4729[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4730 0, 0, CNTR_NORMAL,
4731 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4732[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4733 0, 0, CNTR_NORMAL,
4734 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4735[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4736 0, 0, CNTR_NORMAL,
4737 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4738[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4739 0, 0, CNTR_NORMAL,
4740 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4741[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4742 0, 0, CNTR_NORMAL,
4743 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4744[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4745 0, 0, CNTR_NORMAL,
4746 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4747[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4748 0, 0, CNTR_NORMAL,
4749 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4750[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4751 0, 0, CNTR_NORMAL,
4752 access_tx_sdma15_disallowed_packet_err_cnt),
4753[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_sdma14_disallowed_packet_err_cnt),
4756[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_sdma13_disallowed_packet_err_cnt),
4759[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_sdma12_disallowed_packet_err_cnt),
4762[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_sdma11_disallowed_packet_err_cnt),
4765[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_sdma10_disallowed_packet_err_cnt),
4768[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_sdma9_disallowed_packet_err_cnt),
4771[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_sdma8_disallowed_packet_err_cnt),
4774[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_sdma7_disallowed_packet_err_cnt),
4777[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4778 0, 0, CNTR_NORMAL,
4779 access_tx_sdma6_disallowed_packet_err_cnt),
4780[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4781 0, 0, CNTR_NORMAL,
4782 access_tx_sdma5_disallowed_packet_err_cnt),
4783[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4784 0, 0, CNTR_NORMAL,
4785 access_tx_sdma4_disallowed_packet_err_cnt),
4786[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4787 0, 0, CNTR_NORMAL,
4788 access_tx_sdma3_disallowed_packet_err_cnt),
4789[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4790 0, 0, CNTR_NORMAL,
4791 access_tx_sdma2_disallowed_packet_err_cnt),
4792[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4793 0, 0, CNTR_NORMAL,
4794 access_tx_sdma1_disallowed_packet_err_cnt),
4795[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4796 0, 0, CNTR_NORMAL,
4797 access_tx_sdma0_disallowed_packet_err_cnt),
4798[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4799 CNTR_NORMAL,
4800 access_tx_config_parity_err_cnt),
4801[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4802 CNTR_NORMAL,
4803 access_tx_sbrd_ctl_csr_parity_err_cnt),
4804[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4805 CNTR_NORMAL,
4806 access_tx_launch_csr_parity_err_cnt),
4807[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4808 CNTR_NORMAL,
4809 access_tx_illegal_vl_err_cnt),
4810[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4811 "TxSbrdCtlStateMachineParityErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4814[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4815 CNTR_NORMAL,
4816 access_egress_reserved_10_err_cnt),
4817[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4818 CNTR_NORMAL,
4819 access_egress_reserved_9_err_cnt),
4820[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4821 0, 0, CNTR_NORMAL,
4822 access_tx_sdma_launch_intf_parity_err_cnt),
4823[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_pio_launch_intf_parity_err_cnt),
4826[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4827 CNTR_NORMAL,
4828 access_egress_reserved_6_err_cnt),
4829[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_incorrect_link_state_err_cnt),
4832[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_tx_linkdown_err_cnt),
4835[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4836 "EgressFifoUnderrunOrParityErr", 0, 0,
4837 CNTR_NORMAL,
4838 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4839[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4840 CNTR_NORMAL,
4841 access_egress_reserved_2_err_cnt),
4842[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4843 CNTR_NORMAL,
4844 access_tx_pkt_integrity_mem_unc_err_cnt),
4845[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4846 CNTR_NORMAL,
4847 access_tx_pkt_integrity_mem_cor_err_cnt),
4848/* SendErrStatus */
4849[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4850 CNTR_NORMAL,
4851 access_send_csr_write_bad_addr_err_cnt),
4852[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4853 CNTR_NORMAL,
4854 access_send_csr_read_bad_addr_err_cnt),
4855[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4856 CNTR_NORMAL,
4857 access_send_csr_parity_cnt),
4858/* SendCtxtErrStatus */
4859[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4860 CNTR_NORMAL,
4861 access_pio_write_out_of_bounds_err_cnt),
4862[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_pio_write_overflow_err_cnt),
4865[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4866 0, 0, CNTR_NORMAL,
4867 access_pio_write_crosses_boundary_err_cnt),
4868[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4869 CNTR_NORMAL,
4870 access_pio_disallowed_packet_err_cnt),
4871[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_pio_inconsistent_sop_err_cnt),
4874/* SendDmaEngErrStatus */
4875[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4876 0, 0, CNTR_NORMAL,
4877 access_sdma_header_request_fifo_cor_err_cnt),
4878[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_sdma_header_storage_cor_err_cnt),
4881[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_sdma_packet_tracking_cor_err_cnt),
4884[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_sdma_assembly_cor_err_cnt),
4887[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_sdma_desc_table_cor_err_cnt),
4890[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4891 0, 0, CNTR_NORMAL,
4892 access_sdma_header_request_fifo_unc_err_cnt),
4893[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_sdma_header_storage_unc_err_cnt),
4896[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4897 CNTR_NORMAL,
4898 access_sdma_packet_tracking_unc_err_cnt),
4899[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_sdma_assembly_unc_err_cnt),
4902[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_sdma_desc_table_unc_err_cnt),
4905[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_sdma_timeout_err_cnt),
4908[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_header_length_err_cnt),
4911[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_header_address_err_cnt),
4914[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_header_select_err_cnt),
4917[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_reserved_9_err_cnt),
4920[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_sdma_packet_desc_overflow_err_cnt),
4923[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_length_mismatch_err_cnt),
4926[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4927 CNTR_NORMAL,
4928 access_sdma_halt_err_cnt),
4929[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4930 CNTR_NORMAL,
4931 access_sdma_mem_read_err_cnt),
4932[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_sdma_first_desc_err_cnt),
4935[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4936 CNTR_NORMAL,
4937 access_sdma_tail_out_of_bounds_err_cnt),
4938[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4939 CNTR_NORMAL,
4940 access_sdma_too_long_err_cnt),
4941[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_sdma_gen_mismatch_err_cnt),
4944[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4945 CNTR_NORMAL,
4946 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004947};
4948
4949static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4950[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4951 CNTR_NORMAL),
4952[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4953 CNTR_NORMAL),
4954[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4955 CNTR_NORMAL),
4956[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4957 CNTR_NORMAL),
4958[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4959 CNTR_NORMAL),
4960[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4961 CNTR_NORMAL),
4962[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4963 CNTR_NORMAL),
4964[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4965[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4966[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4967[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4968 CNTR_SYNTH | CNTR_VL),
4969[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4970 CNTR_SYNTH | CNTR_VL),
4971[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4972 CNTR_SYNTH | CNTR_VL),
4973[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4974[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4975[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4976 access_sw_link_dn_cnt),
4977[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4978 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004979[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4980 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004981[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4982 access_sw_xmit_discards),
4983[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4984 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4985 access_sw_xmit_discards),
4986[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4987 access_xmit_constraint_errs),
4988[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4989 access_rcv_constraint_errs),
4990[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4991[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4992[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4993[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4994[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4995[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4996[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4997[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4998[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4999[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5000[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5001[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5002[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5003 access_sw_cpu_rc_acks),
5004[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5005 access_sw_cpu_rc_qacks),
5006[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5007 access_sw_cpu_rc_delayed_comp),
5008[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5009[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5010[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5011[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5012[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5013[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5014[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5015[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5016[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5017[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5018[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5019[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5020[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5021[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5022[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5023[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5024[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5025[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5026[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5027[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5028[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5029[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5030[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5031[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5032[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5033[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5034[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5035[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5036[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5037[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5038[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5039[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5040[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5041[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5042[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5043[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5044[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5045[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5046[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5047[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5048[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5049[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5050[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5051[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5052[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5053[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5054[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5055[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5056[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5057[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5058[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5059[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5060[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5061[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5062[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5063[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5064[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5065[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5066[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5067[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5068[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5069[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5070[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5071[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5072[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5073[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5074[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5075[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5076[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5077[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5078[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5079[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5080[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5081[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5082[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5083[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5084[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5085[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5086[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5087[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5088};
5089
5090/* ======================================================================== */
5091
Mike Marciniszyn77241052015-07-30 15:17:43 -04005092/* return true if this is chip revision revision a */
5093int is_ax(struct hfi1_devdata *dd)
5094{
5095 u8 chip_rev_minor =
5096 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5097 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5098 return (chip_rev_minor & 0xf0) == 0;
5099}
5100
5101/* return true if this is chip revision revision b */
5102int is_bx(struct hfi1_devdata *dd)
5103{
5104 u8 chip_rev_minor =
5105 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5106 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005107 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005108}
5109
5110/*
5111 * Append string s to buffer buf. Arguments curp and len are the current
5112 * position and remaining length, respectively.
5113 *
5114 * return 0 on success, 1 on out of room
5115 */
5116static int append_str(char *buf, char **curp, int *lenp, const char *s)
5117{
5118 char *p = *curp;
5119 int len = *lenp;
5120 int result = 0; /* success */
5121 char c;
5122
5123 /* add a comma, if first in the buffer */
5124 if (p != buf) {
5125 if (len == 0) {
5126 result = 1; /* out of room */
5127 goto done;
5128 }
5129 *p++ = ',';
5130 len--;
5131 }
5132
5133 /* copy the string */
5134 while ((c = *s++) != 0) {
5135 if (len == 0) {
5136 result = 1; /* out of room */
5137 goto done;
5138 }
5139 *p++ = c;
5140 len--;
5141 }
5142
5143done:
5144 /* write return values */
5145 *curp = p;
5146 *lenp = len;
5147
5148 return result;
5149}
5150
5151/*
5152 * Using the given flag table, print a comma separated string into
5153 * the buffer. End in '*' if the buffer is too short.
5154 */
5155static char *flag_string(char *buf, int buf_len, u64 flags,
5156 struct flag_table *table, int table_size)
5157{
5158 char extra[32];
5159 char *p = buf;
5160 int len = buf_len;
5161 int no_room = 0;
5162 int i;
5163
5164 /* make sure there is at least 2 so we can form "*" */
5165 if (len < 2)
5166 return "";
5167
5168 len--; /* leave room for a nul */
5169 for (i = 0; i < table_size; i++) {
5170 if (flags & table[i].flag) {
5171 no_room = append_str(buf, &p, &len, table[i].str);
5172 if (no_room)
5173 break;
5174 flags &= ~table[i].flag;
5175 }
5176 }
5177
5178 /* any undocumented bits left? */
5179 if (!no_room && flags) {
5180 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5181 no_room = append_str(buf, &p, &len, extra);
5182 }
5183
5184 /* add * if ran out of room */
5185 if (no_room) {
5186 /* may need to back up to add space for a '*' */
5187 if (len == 0)
5188 --p;
5189 *p++ = '*';
5190 }
5191
5192 /* add final nul - space already allocated above */
5193 *p = 0;
5194 return buf;
5195}
5196
5197/* first 8 CCE error interrupt source names */
5198static const char * const cce_misc_names[] = {
5199 "CceErrInt", /* 0 */
5200 "RxeErrInt", /* 1 */
5201 "MiscErrInt", /* 2 */
5202 "Reserved3", /* 3 */
5203 "PioErrInt", /* 4 */
5204 "SDmaErrInt", /* 5 */
5205 "EgressErrInt", /* 6 */
5206 "TxeErrInt" /* 7 */
5207};
5208
5209/*
5210 * Return the miscellaneous error interrupt name.
5211 */
5212static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5213{
5214 if (source < ARRAY_SIZE(cce_misc_names))
5215 strncpy(buf, cce_misc_names[source], bsize);
5216 else
5217 snprintf(buf,
5218 bsize,
5219 "Reserved%u",
5220 source + IS_GENERAL_ERR_START);
5221
5222 return buf;
5223}
5224
5225/*
5226 * Return the SDMA engine error interrupt name.
5227 */
5228static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5229{
5230 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5231 return buf;
5232}
5233
5234/*
5235 * Return the send context error interrupt name.
5236 */
5237static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5238{
5239 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5240 return buf;
5241}
5242
5243static const char * const various_names[] = {
5244 "PbcInt",
5245 "GpioAssertInt",
5246 "Qsfp1Int",
5247 "Qsfp2Int",
5248 "TCritInt"
5249};
5250
5251/*
5252 * Return the various interrupt name.
5253 */
5254static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5255{
5256 if (source < ARRAY_SIZE(various_names))
5257 strncpy(buf, various_names[source], bsize);
5258 else
Jubin John8638b772016-02-14 20:19:24 -08005259 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005260 return buf;
5261}
5262
5263/*
5264 * Return the DC interrupt name.
5265 */
5266static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5267{
5268 static const char * const dc_int_names[] = {
5269 "common",
5270 "lcb",
5271 "8051",
5272 "lbm" /* local block merge */
5273 };
5274
5275 if (source < ARRAY_SIZE(dc_int_names))
5276 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5277 else
5278 snprintf(buf, bsize, "DCInt%u", source);
5279 return buf;
5280}
5281
5282static const char * const sdma_int_names[] = {
5283 "SDmaInt",
5284 "SdmaIdleInt",
5285 "SdmaProgressInt",
5286};
5287
5288/*
5289 * Return the SDMA engine interrupt name.
5290 */
5291static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5292{
5293 /* what interrupt */
5294 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5295 /* which engine */
5296 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5297
5298 if (likely(what < 3))
5299 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5300 else
5301 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5302 return buf;
5303}
5304
5305/*
5306 * Return the receive available interrupt name.
5307 */
5308static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5309{
5310 snprintf(buf, bsize, "RcvAvailInt%u", source);
5311 return buf;
5312}
5313
5314/*
5315 * Return the receive urgent interrupt name.
5316 */
5317static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5318{
5319 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5320 return buf;
5321}
5322
5323/*
5324 * Return the send credit interrupt name.
5325 */
5326static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5327{
5328 snprintf(buf, bsize, "SendCreditInt%u", source);
5329 return buf;
5330}
5331
5332/*
5333 * Return the reserved interrupt name.
5334 */
5335static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5336{
5337 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5338 return buf;
5339}
5340
5341static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5342{
5343 return flag_string(buf, buf_len, flags,
5344 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5345}
5346
5347static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5348{
5349 return flag_string(buf, buf_len, flags,
5350 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5351}
5352
5353static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5354{
5355 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5356 ARRAY_SIZE(misc_err_status_flags));
5357}
5358
5359static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5360{
5361 return flag_string(buf, buf_len, flags,
5362 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5363}
5364
5365static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5366{
5367 return flag_string(buf, buf_len, flags,
5368 sdma_err_status_flags,
5369 ARRAY_SIZE(sdma_err_status_flags));
5370}
5371
5372static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5373{
5374 return flag_string(buf, buf_len, flags,
5375 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5376}
5377
5378static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5379{
5380 return flag_string(buf, buf_len, flags,
5381 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5382}
5383
5384static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5385{
5386 return flag_string(buf, buf_len, flags,
5387 send_err_status_flags,
5388 ARRAY_SIZE(send_err_status_flags));
5389}
5390
5391static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5392{
5393 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005394 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005395
5396 /*
5397 * For most these errors, there is nothing that can be done except
5398 * report or record it.
5399 */
5400 dd_dev_info(dd, "CCE Error: %s\n",
5401 cce_err_status_string(buf, sizeof(buf), reg));
5402
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005403 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5404 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005405 /* this error requires a manual drop into SPC freeze mode */
5406 /* then a fix up */
5407 start_freeze_handling(dd->pport, FREEZE_SELF);
5408 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005409
5410 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5411 if (reg & (1ull << i)) {
5412 incr_cntr64(&dd->cce_err_status_cnt[i]);
5413 /* maintain a counter over all cce_err_status errors */
5414 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5415 }
5416 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005417}
5418
5419/*
5420 * Check counters for receive errors that do not have an interrupt
5421 * associated with them.
5422 */
5423#define RCVERR_CHECK_TIME 10
5424static void update_rcverr_timer(unsigned long opaque)
5425{
5426 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5427 struct hfi1_pportdata *ppd = dd->pport;
5428 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5429
5430 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5431 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5432 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5433 set_link_down_reason(ppd,
5434 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5435 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5436 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5437 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005438 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005439
5440 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5441}
5442
5443static int init_rcverr(struct hfi1_devdata *dd)
5444{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305445 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005446 /* Assume the hardware counter has been reset */
5447 dd->rcv_ovfl_cnt = 0;
5448 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5449}
5450
5451static void free_rcverr(struct hfi1_devdata *dd)
5452{
5453 if (dd->rcverr_timer.data)
5454 del_timer_sync(&dd->rcverr_timer);
5455 dd->rcverr_timer.data = 0;
5456}
5457
5458static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5459{
5460 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005461 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005462
5463 dd_dev_info(dd, "Receive Error: %s\n",
5464 rxe_err_status_string(buf, sizeof(buf), reg));
5465
5466 if (reg & ALL_RXE_FREEZE_ERR) {
5467 int flags = 0;
5468
5469 /*
5470 * Freeze mode recovery is disabled for the errors
5471 * in RXE_FREEZE_ABORT_MASK
5472 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005473 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005474 flags = FREEZE_ABORT;
5475
5476 start_freeze_handling(dd->pport, flags);
5477 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005478
5479 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5480 if (reg & (1ull << i))
5481 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5482 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005483}
5484
5485static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5486{
5487 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005488 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005489
5490 dd_dev_info(dd, "Misc Error: %s",
5491 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005492 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5493 if (reg & (1ull << i))
5494 incr_cntr64(&dd->misc_err_status_cnt[i]);
5495 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005496}
5497
5498static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5499{
5500 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005501 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005502
5503 dd_dev_info(dd, "PIO Error: %s\n",
5504 pio_err_status_string(buf, sizeof(buf), reg));
5505
5506 if (reg & ALL_PIO_FREEZE_ERR)
5507 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005508
5509 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5510 if (reg & (1ull << i))
5511 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5512 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005513}
5514
5515static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5516{
5517 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005518 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005519
5520 dd_dev_info(dd, "SDMA Error: %s\n",
5521 sdma_err_status_string(buf, sizeof(buf), reg));
5522
5523 if (reg & ALL_SDMA_FREEZE_ERR)
5524 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005525
5526 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5527 if (reg & (1ull << i))
5528 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5529 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005530}
5531
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005532static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5533{
5534 incr_cntr64(&ppd->port_xmit_discards);
5535}
5536
Mike Marciniszyn77241052015-07-30 15:17:43 -04005537static void count_port_inactive(struct hfi1_devdata *dd)
5538{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005539 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005540}
5541
5542/*
5543 * We have had a "disallowed packet" error during egress. Determine the
5544 * integrity check which failed, and update relevant error counter, etc.
5545 *
5546 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5547 * bit of state per integrity check, and so we can miss the reason for an
5548 * egress error if more than one packet fails the same integrity check
5549 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5550 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005551static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5552 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005553{
5554 struct hfi1_pportdata *ppd = dd->pport;
5555 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5556 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5557 char buf[96];
5558
5559 /* clear down all observed info as quickly as possible after read */
5560 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5561
5562 dd_dev_info(dd,
5563 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5564 info, egress_err_info_string(buf, sizeof(buf), info), src);
5565
5566 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005567 if (info & PORT_DISCARD_EGRESS_ERRS) {
5568 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005569
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005570 /*
5571 * Count all, in case multiple bits are set. Reminder:
5572 * since there is only one info register for many sources,
5573 * these may be attributed to the wrong VL if they occur
5574 * too close together.
5575 */
5576 weight = hweight64(info);
5577 for (i = 0; i < weight; i++) {
5578 __count_port_discards(ppd);
5579 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5580 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5581 else if (vl == 15)
5582 incr_cntr64(&ppd->port_xmit_discards_vl
5583 [C_VL_15]);
5584 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005585 }
5586}
5587
5588/*
5589 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5590 * register. Does it represent a 'port inactive' error?
5591 */
5592static inline int port_inactive_err(u64 posn)
5593{
5594 return (posn >= SEES(TX_LINKDOWN) &&
5595 posn <= SEES(TX_INCORRECT_LINK_STATE));
5596}
5597
5598/*
5599 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5600 * register. Does it represent a 'disallowed packet' error?
5601 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005602static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005603{
5604 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5605 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5606}
5607
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005608/*
5609 * Input value is a bit position of one of the SDMA engine disallowed
5610 * packet errors. Return which engine. Use of this must be guarded by
5611 * disallowed_pkt_err().
5612 */
5613static inline int disallowed_pkt_engine(int posn)
5614{
5615 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5616}
5617
5618/*
5619 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5620 * be done.
5621 */
5622static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5623{
5624 struct sdma_vl_map *m;
5625 int vl;
5626
5627 /* range check */
5628 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5629 return -1;
5630
5631 rcu_read_lock();
5632 m = rcu_dereference(dd->sdma_map);
5633 vl = m->engine_to_vl[engine];
5634 rcu_read_unlock();
5635
5636 return vl;
5637}
5638
5639/*
5640 * Translate the send context (sofware index) into a VL. Return -1 if the
5641 * translation cannot be done.
5642 */
5643static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5644{
5645 struct send_context_info *sci;
5646 struct send_context *sc;
5647 int i;
5648
5649 sci = &dd->send_contexts[sw_index];
5650
5651 /* there is no information for user (PSM) and ack contexts */
5652 if (sci->type != SC_KERNEL)
5653 return -1;
5654
5655 sc = sci->sc;
5656 if (!sc)
5657 return -1;
5658 if (dd->vld[15].sc == sc)
5659 return 15;
5660 for (i = 0; i < num_vls; i++)
5661 if (dd->vld[i].sc == sc)
5662 return i;
5663
5664 return -1;
5665}
5666
Mike Marciniszyn77241052015-07-30 15:17:43 -04005667static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5668{
5669 u64 reg_copy = reg, handled = 0;
5670 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005671 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005672
5673 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5674 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005675 else if (is_ax(dd) &&
5676 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5677 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005678 start_freeze_handling(dd->pport, 0);
5679
5680 while (reg_copy) {
5681 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005682 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005683 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005684 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005685
5686 if (port_inactive_err(shift)) {
5687 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005688 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005689 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005690 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5691
5692 handle_send_egress_err_info(dd, vl);
5693 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005694 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005695 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005696 }
5697
5698 reg &= ~handled;
5699
5700 if (reg)
5701 dd_dev_info(dd, "Egress Error: %s\n",
5702 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005703
5704 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5705 if (reg & (1ull << i))
5706 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5707 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005708}
5709
5710static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5711{
5712 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005713 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005714
5715 dd_dev_info(dd, "Send Error: %s\n",
5716 send_err_status_string(buf, sizeof(buf), reg));
5717
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005718 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5719 if (reg & (1ull << i))
5720 incr_cntr64(&dd->send_err_status_cnt[i]);
5721 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005722}
5723
5724/*
5725 * The maximum number of times the error clear down will loop before
5726 * blocking a repeating error. This value is arbitrary.
5727 */
5728#define MAX_CLEAR_COUNT 20
5729
5730/*
5731 * Clear and handle an error register. All error interrupts are funneled
5732 * through here to have a central location to correctly handle single-
5733 * or multi-shot errors.
5734 *
5735 * For non per-context registers, call this routine with a context value
5736 * of 0 so the per-context offset is zero.
5737 *
5738 * If the handler loops too many times, assume that something is wrong
5739 * and can't be fixed, so mask the error bits.
5740 */
5741static void interrupt_clear_down(struct hfi1_devdata *dd,
5742 u32 context,
5743 const struct err_reg_info *eri)
5744{
5745 u64 reg;
5746 u32 count;
5747
5748 /* read in a loop until no more errors are seen */
5749 count = 0;
5750 while (1) {
5751 reg = read_kctxt_csr(dd, context, eri->status);
5752 if (reg == 0)
5753 break;
5754 write_kctxt_csr(dd, context, eri->clear, reg);
5755 if (likely(eri->handler))
5756 eri->handler(dd, context, reg);
5757 count++;
5758 if (count > MAX_CLEAR_COUNT) {
5759 u64 mask;
5760
5761 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5762 eri->desc, reg);
5763 /*
5764 * Read-modify-write so any other masked bits
5765 * remain masked.
5766 */
5767 mask = read_kctxt_csr(dd, context, eri->mask);
5768 mask &= ~reg;
5769 write_kctxt_csr(dd, context, eri->mask, mask);
5770 break;
5771 }
5772 }
5773}
5774
5775/*
5776 * CCE block "misc" interrupt. Source is < 16.
5777 */
5778static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5779{
5780 const struct err_reg_info *eri = &misc_errs[source];
5781
5782 if (eri->handler) {
5783 interrupt_clear_down(dd, 0, eri);
5784 } else {
5785 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5786 source);
5787 }
5788}
5789
5790static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5791{
5792 return flag_string(buf, buf_len, flags,
5793 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5794}
5795
5796/*
5797 * Send context error interrupt. Source (hw_context) is < 160.
5798 *
5799 * All send context errors cause the send context to halt. The normal
5800 * clear-down mechanism cannot be used because we cannot clear the
5801 * error bits until several other long-running items are done first.
5802 * This is OK because with the context halted, nothing else is going
5803 * to happen on it anyway.
5804 */
5805static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5806 unsigned int hw_context)
5807{
5808 struct send_context_info *sci;
5809 struct send_context *sc;
5810 char flags[96];
5811 u64 status;
5812 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005813 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005814
5815 sw_index = dd->hw_to_sw[hw_context];
5816 if (sw_index >= dd->num_send_contexts) {
5817 dd_dev_err(dd,
5818 "out of range sw index %u for send context %u\n",
5819 sw_index, hw_context);
5820 return;
5821 }
5822 sci = &dd->send_contexts[sw_index];
5823 sc = sci->sc;
5824 if (!sc) {
5825 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5826 sw_index, hw_context);
5827 return;
5828 }
5829
5830 /* tell the software that a halt has begun */
5831 sc_stop(sc, SCF_HALTED);
5832
5833 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5834
5835 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5836 send_context_err_status_string(flags, sizeof(flags), status));
5837
5838 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005839 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005840
5841 /*
5842 * Automatically restart halted kernel contexts out of interrupt
5843 * context. User contexts must ask the driver to restart the context.
5844 */
5845 if (sc->type != SC_USER)
5846 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005847
5848 /*
5849 * Update the counters for the corresponding status bits.
5850 * Note that these particular counters are aggregated over all
5851 * 160 contexts.
5852 */
5853 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5854 if (status & (1ull << i))
5855 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5856 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005857}
5858
5859static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5860 unsigned int source, u64 status)
5861{
5862 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005863 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005864
5865 sde = &dd->per_sdma[source];
5866#ifdef CONFIG_SDMA_VERBOSITY
5867 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5868 slashstrip(__FILE__), __LINE__, __func__);
5869 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5870 sde->this_idx, source, (unsigned long long)status);
5871#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005872 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005873 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005874
5875 /*
5876 * Update the counters for the corresponding status bits.
5877 * Note that these particular counters are aggregated over
5878 * all 16 DMA engines.
5879 */
5880 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5881 if (status & (1ull << i))
5882 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5883 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005884}
5885
5886/*
5887 * CCE block SDMA error interrupt. Source is < 16.
5888 */
5889static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5890{
5891#ifdef CONFIG_SDMA_VERBOSITY
5892 struct sdma_engine *sde = &dd->per_sdma[source];
5893
5894 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5895 slashstrip(__FILE__), __LINE__, __func__);
5896 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5897 source);
5898 sdma_dumpstate(sde);
5899#endif
5900 interrupt_clear_down(dd, source, &sdma_eng_err);
5901}
5902
5903/*
5904 * CCE block "various" interrupt. Source is < 8.
5905 */
5906static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5907{
5908 const struct err_reg_info *eri = &various_err[source];
5909
5910 /*
5911 * TCritInt cannot go through interrupt_clear_down()
5912 * because it is not a second tier interrupt. The handler
5913 * should be called directly.
5914 */
5915 if (source == TCRIT_INT_SOURCE)
5916 handle_temp_err(dd);
5917 else if (eri->handler)
5918 interrupt_clear_down(dd, 0, eri);
5919 else
5920 dd_dev_info(dd,
5921 "%s: Unimplemented/reserved interrupt %d\n",
5922 __func__, source);
5923}
5924
5925static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5926{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005927 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005928 struct hfi1_pportdata *ppd = dd->pport;
5929 unsigned long flags;
5930 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5931
5932 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005933 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5934 __func__);
5935
5936 if (!qsfp_mod_present(ppd)) {
5937 ppd->driver_link_ready = 0;
5938 /*
5939 * Cable removed, reset all our information about the
5940 * cache and cable capabilities
5941 */
5942
5943 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5944 /*
5945 * We don't set cache_refresh_required here as we expect
5946 * an interrupt when a cable is inserted
5947 */
5948 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005949 ppd->qsfp_info.reset_needed = 0;
5950 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005951 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5952 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005953 /* Invert the ModPresent pin now to detect plug-in */
5954 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5955 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005956
5957 if ((ppd->offline_disabled_reason >
5958 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005959 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005960 (ppd->offline_disabled_reason ==
5961 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5962 ppd->offline_disabled_reason =
5963 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005964 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005965
Mike Marciniszyn77241052015-07-30 15:17:43 -04005966 if (ppd->host_link_state == HLS_DN_POLL) {
5967 /*
5968 * The link is still in POLL. This means
5969 * that the normal link down processing
5970 * will not happen. We have to do it here
5971 * before turning the DC off.
5972 */
5973 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5974 }
5975 } else {
5976 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5977 ppd->qsfp_info.cache_valid = 0;
5978 ppd->qsfp_info.cache_refresh_required = 1;
5979 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5980 flags);
5981
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005982 /*
5983 * Stop inversion of ModPresent pin to detect
5984 * removal of the cable
5985 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005986 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005987 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5988 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5989
5990 ppd->offline_disabled_reason =
5991 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005992 }
5993 }
5994
5995 if (reg & QSFP_HFI0_INT_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005996 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5997 __func__);
5998 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5999 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006000 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6001 }
6002
6003 /* Schedule the QSFP work only if there is a cable attached. */
6004 if (qsfp_mod_present(ppd))
6005 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6006}
6007
6008static int request_host_lcb_access(struct hfi1_devdata *dd)
6009{
6010 int ret;
6011
6012 ret = do_8051_command(dd, HCMD_MISC,
6013 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6014 NULL);
6015 if (ret != HCMD_SUCCESS) {
6016 dd_dev_err(dd, "%s: command failed with error %d\n",
6017 __func__, ret);
6018 }
6019 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6020}
6021
6022static int request_8051_lcb_access(struct hfi1_devdata *dd)
6023{
6024 int ret;
6025
6026 ret = do_8051_command(dd, HCMD_MISC,
6027 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6028 NULL);
6029 if (ret != HCMD_SUCCESS) {
6030 dd_dev_err(dd, "%s: command failed with error %d\n",
6031 __func__, ret);
6032 }
6033 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6034}
6035
6036/*
6037 * Set the LCB selector - allow host access. The DCC selector always
6038 * points to the host.
6039 */
6040static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6041{
6042 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6043 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
6044 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6045}
6046
6047/*
6048 * Clear the LCB selector - allow 8051 access. The DCC selector always
6049 * points to the host.
6050 */
6051static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6052{
6053 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6054 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6055}
6056
6057/*
6058 * Acquire LCB access from the 8051. If the host already has access,
6059 * just increment a counter. Otherwise, inform the 8051 that the
6060 * host is taking access.
6061 *
6062 * Returns:
6063 * 0 on success
6064 * -EBUSY if the 8051 has control and cannot be disturbed
6065 * -errno if unable to acquire access from the 8051
6066 */
6067int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6068{
6069 struct hfi1_pportdata *ppd = dd->pport;
6070 int ret = 0;
6071
6072 /*
6073 * Use the host link state lock so the operation of this routine
6074 * { link state check, selector change, count increment } can occur
6075 * as a unit against a link state change. Otherwise there is a
6076 * race between the state change and the count increment.
6077 */
6078 if (sleep_ok) {
6079 mutex_lock(&ppd->hls_lock);
6080 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006081 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006082 udelay(1);
6083 }
6084
6085 /* this access is valid only when the link is up */
6086 if ((ppd->host_link_state & HLS_UP) == 0) {
6087 dd_dev_info(dd, "%s: link state %s not up\n",
6088 __func__, link_state_name(ppd->host_link_state));
6089 ret = -EBUSY;
6090 goto done;
6091 }
6092
6093 if (dd->lcb_access_count == 0) {
6094 ret = request_host_lcb_access(dd);
6095 if (ret) {
6096 dd_dev_err(dd,
6097 "%s: unable to acquire LCB access, err %d\n",
6098 __func__, ret);
6099 goto done;
6100 }
6101 set_host_lcb_access(dd);
6102 }
6103 dd->lcb_access_count++;
6104done:
6105 mutex_unlock(&ppd->hls_lock);
6106 return ret;
6107}
6108
6109/*
6110 * Release LCB access by decrementing the use count. If the count is moving
6111 * from 1 to 0, inform 8051 that it has control back.
6112 *
6113 * Returns:
6114 * 0 on success
6115 * -errno if unable to release access to the 8051
6116 */
6117int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6118{
6119 int ret = 0;
6120
6121 /*
6122 * Use the host link state lock because the acquire needed it.
6123 * Here, we only need to keep { selector change, count decrement }
6124 * as a unit.
6125 */
6126 if (sleep_ok) {
6127 mutex_lock(&dd->pport->hls_lock);
6128 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006129 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006130 udelay(1);
6131 }
6132
6133 if (dd->lcb_access_count == 0) {
6134 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6135 __func__);
6136 goto done;
6137 }
6138
6139 if (dd->lcb_access_count == 1) {
6140 set_8051_lcb_access(dd);
6141 ret = request_8051_lcb_access(dd);
6142 if (ret) {
6143 dd_dev_err(dd,
6144 "%s: unable to release LCB access, err %d\n",
6145 __func__, ret);
6146 /* restore host access if the grant didn't work */
6147 set_host_lcb_access(dd);
6148 goto done;
6149 }
6150 }
6151 dd->lcb_access_count--;
6152done:
6153 mutex_unlock(&dd->pport->hls_lock);
6154 return ret;
6155}
6156
6157/*
6158 * Initialize LCB access variables and state. Called during driver load,
6159 * after most of the initialization is finished.
6160 *
6161 * The DC default is LCB access on for the host. The driver defaults to
6162 * leaving access to the 8051. Assign access now - this constrains the call
6163 * to this routine to be after all LCB set-up is done. In particular, after
6164 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6165 */
6166static void init_lcb_access(struct hfi1_devdata *dd)
6167{
6168 dd->lcb_access_count = 0;
6169}
6170
6171/*
6172 * Write a response back to a 8051 request.
6173 */
6174static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6175{
6176 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6177 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6178 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6179 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6180}
6181
6182/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006183 * Handle host requests from the 8051.
6184 *
6185 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006186 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006187void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006188{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006189 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6190 dc_host_req_work);
6191 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006192 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006193 u16 data = 0;
6194 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6195 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006196
6197 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6198 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6199 return; /* no request */
6200
6201 /* zero out COMPLETED so the response is seen */
6202 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6203
6204 /* extract request details */
6205 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6206 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6207 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6208 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6209
6210 switch (type) {
6211 case HREQ_LOAD_CONFIG:
6212 case HREQ_SAVE_CONFIG:
6213 case HREQ_READ_CONFIG:
6214 case HREQ_SET_TX_EQ_ABS:
6215 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006216 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6217 type);
6218 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6219 break;
6220
Easwar Hariharancbac3862016-02-03 14:31:31 -08006221 case HREQ_ENABLE:
6222 lanes = data & 0xF;
6223 for (i = 0; lanes; lanes >>= 1, i++) {
6224 if (!(lanes & 1))
6225 continue;
6226 if (data & 0x200) {
6227 /* enable TX CDR */
6228 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6229 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6230 cdr_ctrl_byte |= (1 << (i + 4));
6231 } else {
6232 /* disable TX CDR */
6233 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6234 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6235 cdr_ctrl_byte &= ~(1 << (i + 4));
6236 }
6237
6238 if (data & 0x800) {
6239 /* enable RX CDR */
6240 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6241 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6242 cdr_ctrl_byte |= (1 << i);
6243 } else {
6244 /* disable RX CDR */
6245 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6246 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6247 cdr_ctrl_byte &= ~(1 << i);
6248 }
6249 }
6250 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6251 &cdr_ctrl_byte, 1);
6252 hreq_response(dd, HREQ_SUCCESS, data);
6253 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6254 break;
6255
Mike Marciniszyn77241052015-07-30 15:17:43 -04006256 case HREQ_CONFIG_DONE:
6257 hreq_response(dd, HREQ_SUCCESS, 0);
6258 break;
6259
6260 case HREQ_INTERFACE_TEST:
6261 hreq_response(dd, HREQ_SUCCESS, data);
6262 break;
6263
6264 default:
6265 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6266 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6267 break;
6268 }
6269}
6270
6271static void write_global_credit(struct hfi1_devdata *dd,
6272 u8 vau, u16 total, u16 shared)
6273{
6274 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6275 ((u64)total
6276 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6277 | ((u64)shared
6278 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6279 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6280}
6281
6282/*
6283 * Set up initial VL15 credits of the remote. Assumes the rest of
6284 * the CM credit registers are zero from a previous global or credit reset .
6285 */
6286void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6287{
6288 /* leave shared count at zero for both global and VL15 */
6289 write_global_credit(dd, vau, vl15buf, 0);
6290
6291 /* We may need some credits for another VL when sending packets
6292 * with the snoop interface. Dividing it down the middle for VL15
6293 * and VL0 should suffice.
6294 */
6295 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6296 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6297 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6298 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6299 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6300 } else {
6301 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6302 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6303 }
6304}
6305
6306/*
6307 * Zero all credit details from the previous connection and
6308 * reset the CM manager's internal counters.
6309 */
6310void reset_link_credits(struct hfi1_devdata *dd)
6311{
6312 int i;
6313
6314 /* remove all previous VL credit limits */
6315 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006316 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006317 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6318 write_global_credit(dd, 0, 0, 0);
6319 /* reset the CM block */
6320 pio_send_control(dd, PSC_CM_RESET);
6321}
6322
6323/* convert a vCU to a CU */
6324static u32 vcu_to_cu(u8 vcu)
6325{
6326 return 1 << vcu;
6327}
6328
6329/* convert a CU to a vCU */
6330static u8 cu_to_vcu(u32 cu)
6331{
6332 return ilog2(cu);
6333}
6334
6335/* convert a vAU to an AU */
6336static u32 vau_to_au(u8 vau)
6337{
6338 return 8 * (1 << vau);
6339}
6340
6341static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6342{
6343 ppd->sm_trap_qp = 0x0;
6344 ppd->sa_qp = 0x1;
6345}
6346
6347/*
6348 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6349 */
6350static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6351{
6352 u64 reg;
6353
6354 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6355 write_csr(dd, DC_LCB_CFG_RUN, 0);
6356 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6357 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6358 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6359 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6360 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6361 reg = read_csr(dd, DCC_CFG_RESET);
6362 write_csr(dd, DCC_CFG_RESET,
6363 reg
6364 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6365 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006366 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006367 if (!abort) {
6368 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6369 write_csr(dd, DCC_CFG_RESET, reg);
6370 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6371 }
6372}
6373
6374/*
6375 * This routine should be called after the link has been transitioned to
6376 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6377 * reset).
6378 *
6379 * The expectation is that the caller of this routine would have taken
6380 * care of properly transitioning the link into the correct state.
6381 */
6382static void dc_shutdown(struct hfi1_devdata *dd)
6383{
6384 unsigned long flags;
6385
6386 spin_lock_irqsave(&dd->dc8051_lock, flags);
6387 if (dd->dc_shutdown) {
6388 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6389 return;
6390 }
6391 dd->dc_shutdown = 1;
6392 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6393 /* Shutdown the LCB */
6394 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006395 /*
6396 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006397 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006398 * itself.
6399 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006400 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6401}
6402
Jubin John4d114fd2016-02-14 20:21:43 -08006403/*
6404 * Calling this after the DC has been brought out of reset should not
6405 * do any damage.
6406 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006407static void dc_start(struct hfi1_devdata *dd)
6408{
6409 unsigned long flags;
6410 int ret;
6411
6412 spin_lock_irqsave(&dd->dc8051_lock, flags);
6413 if (!dd->dc_shutdown)
6414 goto done;
6415 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6416 /* Take the 8051 out of reset */
6417 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6418 /* Wait until 8051 is ready */
6419 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6420 if (ret) {
6421 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6422 __func__);
6423 }
6424 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6425 write_csr(dd, DCC_CFG_RESET, 0x10);
6426 /* lcb_shutdown() with abort=1 does not restore these */
6427 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6428 spin_lock_irqsave(&dd->dc8051_lock, flags);
6429 dd->dc_shutdown = 0;
6430done:
6431 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6432}
6433
6434/*
6435 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6436 */
6437static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6438{
6439 u64 rx_radr, tx_radr;
6440 u32 version;
6441
6442 if (dd->icode != ICODE_FPGA_EMULATION)
6443 return;
6444
6445 /*
6446 * These LCB defaults on emulator _s are good, nothing to do here:
6447 * LCB_CFG_TX_FIFOS_RADR
6448 * LCB_CFG_RX_FIFOS_RADR
6449 * LCB_CFG_LN_DCLK
6450 * LCB_CFG_IGNORE_LOST_RCLK
6451 */
6452 if (is_emulator_s(dd))
6453 return;
6454 /* else this is _p */
6455
6456 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006457 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006458 version = 0x2d; /* all B0 use 0x2d or higher settings */
6459
6460 if (version <= 0x12) {
6461 /* release 0x12 and below */
6462
6463 /*
6464 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6465 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6466 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6467 */
6468 rx_radr =
6469 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6470 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6472 /*
6473 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6474 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6475 */
6476 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6477 } else if (version <= 0x18) {
6478 /* release 0x13 up to 0x18 */
6479 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6480 rx_radr =
6481 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6482 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6483 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6484 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6485 } else if (version == 0x19) {
6486 /* release 0x19 */
6487 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6488 rx_radr =
6489 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6490 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6491 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6492 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6493 } else if (version == 0x1a) {
6494 /* release 0x1a */
6495 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6496 rx_radr =
6497 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6498 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6499 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6500 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6501 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6502 } else {
6503 /* release 0x1b and higher */
6504 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6505 rx_radr =
6506 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6507 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6508 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6509 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6510 }
6511
6512 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6513 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6514 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6515 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6516 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6517}
6518
6519/*
6520 * Handle a SMA idle message
6521 *
6522 * This is a work-queue function outside of the interrupt.
6523 */
6524void handle_sma_message(struct work_struct *work)
6525{
6526 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6527 sma_message_work);
6528 struct hfi1_devdata *dd = ppd->dd;
6529 u64 msg;
6530 int ret;
6531
Jubin John4d114fd2016-02-14 20:21:43 -08006532 /*
6533 * msg is bytes 1-4 of the 40-bit idle message - the command code
6534 * is stripped off
6535 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006536 ret = read_idle_sma(dd, &msg);
6537 if (ret)
6538 return;
6539 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6540 /*
6541 * React to the SMA message. Byte[1] (0 for us) is the command.
6542 */
6543 switch (msg & 0xff) {
6544 case SMA_IDLE_ARM:
6545 /*
6546 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6547 * State Transitions
6548 *
6549 * Only expected in INIT or ARMED, discard otherwise.
6550 */
6551 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6552 ppd->neighbor_normal = 1;
6553 break;
6554 case SMA_IDLE_ACTIVE:
6555 /*
6556 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6557 * State Transitions
6558 *
6559 * Can activate the node. Discard otherwise.
6560 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006561 if (ppd->host_link_state == HLS_UP_ARMED &&
6562 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006563 ppd->neighbor_normal = 1;
6564 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6565 if (ret)
6566 dd_dev_err(
6567 dd,
6568 "%s: received Active SMA idle message, couldn't set link to Active\n",
6569 __func__);
6570 }
6571 break;
6572 default:
6573 dd_dev_err(dd,
6574 "%s: received unexpected SMA idle message 0x%llx\n",
6575 __func__, msg);
6576 break;
6577 }
6578}
6579
6580static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6581{
6582 u64 rcvctrl;
6583 unsigned long flags;
6584
6585 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6586 rcvctrl = read_csr(dd, RCV_CTRL);
6587 rcvctrl |= add;
6588 rcvctrl &= ~clear;
6589 write_csr(dd, RCV_CTRL, rcvctrl);
6590 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6591}
6592
6593static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6594{
6595 adjust_rcvctrl(dd, add, 0);
6596}
6597
6598static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6599{
6600 adjust_rcvctrl(dd, 0, clear);
6601}
6602
6603/*
6604 * Called from all interrupt handlers to start handling an SPC freeze.
6605 */
6606void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6607{
6608 struct hfi1_devdata *dd = ppd->dd;
6609 struct send_context *sc;
6610 int i;
6611
6612 if (flags & FREEZE_SELF)
6613 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6614
6615 /* enter frozen mode */
6616 dd->flags |= HFI1_FROZEN;
6617
6618 /* notify all SDMA engines that they are going into a freeze */
6619 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6620
6621 /* do halt pre-handling on all enabled send contexts */
6622 for (i = 0; i < dd->num_send_contexts; i++) {
6623 sc = dd->send_contexts[i].sc;
6624 if (sc && (sc->flags & SCF_ENABLED))
6625 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6626 }
6627
6628 /* Send context are frozen. Notify user space */
6629 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6630
6631 if (flags & FREEZE_ABORT) {
6632 dd_dev_err(dd,
6633 "Aborted freeze recovery. Please REBOOT system\n");
6634 return;
6635 }
6636 /* queue non-interrupt handler */
6637 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6638}
6639
6640/*
6641 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6642 * depending on the "freeze" parameter.
6643 *
6644 * No need to return an error if it times out, our only option
6645 * is to proceed anyway.
6646 */
6647static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6648{
6649 unsigned long timeout;
6650 u64 reg;
6651
6652 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6653 while (1) {
6654 reg = read_csr(dd, CCE_STATUS);
6655 if (freeze) {
6656 /* waiting until all indicators are set */
6657 if ((reg & ALL_FROZE) == ALL_FROZE)
6658 return; /* all done */
6659 } else {
6660 /* waiting until all indicators are clear */
6661 if ((reg & ALL_FROZE) == 0)
6662 return; /* all done */
6663 }
6664
6665 if (time_after(jiffies, timeout)) {
6666 dd_dev_err(dd,
6667 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6668 freeze ? "" : "un",
6669 reg & ALL_FROZE,
6670 freeze ? ALL_FROZE : 0ull);
6671 return;
6672 }
6673 usleep_range(80, 120);
6674 }
6675}
6676
6677/*
6678 * Do all freeze handling for the RXE block.
6679 */
6680static void rxe_freeze(struct hfi1_devdata *dd)
6681{
6682 int i;
6683
6684 /* disable port */
6685 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6686
6687 /* disable all receive contexts */
6688 for (i = 0; i < dd->num_rcv_contexts; i++)
6689 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6690}
6691
6692/*
6693 * Unfreeze handling for the RXE block - kernel contexts only.
6694 * This will also enable the port. User contexts will do unfreeze
6695 * handling on a per-context basis as they call into the driver.
6696 *
6697 */
6698static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6699{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006700 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006701 int i;
6702
6703 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006704 for (i = 0; i < dd->n_krcv_queues; i++) {
6705 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6706 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6707 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6708 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6709 hfi1_rcvctrl(dd, rcvmask, i);
6710 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006711
6712 /* enable port */
6713 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6714}
6715
6716/*
6717 * Non-interrupt SPC freeze handling.
6718 *
6719 * This is a work-queue function outside of the triggering interrupt.
6720 */
6721void handle_freeze(struct work_struct *work)
6722{
6723 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6724 freeze_work);
6725 struct hfi1_devdata *dd = ppd->dd;
6726
6727 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006728 wait_for_freeze_status(dd, 1);
6729
6730 /* SPC is now frozen */
6731
6732 /* do send PIO freeze steps */
6733 pio_freeze(dd);
6734
6735 /* do send DMA freeze steps */
6736 sdma_freeze(dd);
6737
6738 /* do send egress freeze steps - nothing to do */
6739
6740 /* do receive freeze steps */
6741 rxe_freeze(dd);
6742
6743 /*
6744 * Unfreeze the hardware - clear the freeze, wait for each
6745 * block's frozen bit to clear, then clear the frozen flag.
6746 */
6747 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6748 wait_for_freeze_status(dd, 0);
6749
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006750 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006751 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6752 wait_for_freeze_status(dd, 1);
6753 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6754 wait_for_freeze_status(dd, 0);
6755 }
6756
6757 /* do send PIO unfreeze steps for kernel contexts */
6758 pio_kernel_unfreeze(dd);
6759
6760 /* do send DMA unfreeze steps */
6761 sdma_unfreeze(dd);
6762
6763 /* do send egress unfreeze steps - nothing to do */
6764
6765 /* do receive unfreeze steps for kernel contexts */
6766 rxe_kernel_unfreeze(dd);
6767
6768 /*
6769 * The unfreeze procedure touches global device registers when
6770 * it disables and re-enables RXE. Mark the device unfrozen
6771 * after all that is done so other parts of the driver waiting
6772 * for the device to unfreeze don't do things out of order.
6773 *
6774 * The above implies that the meaning of HFI1_FROZEN flag is
6775 * "Device has gone into freeze mode and freeze mode handling
6776 * is still in progress."
6777 *
6778 * The flag will be removed when freeze mode processing has
6779 * completed.
6780 */
6781 dd->flags &= ~HFI1_FROZEN;
6782 wake_up(&dd->event_queue);
6783
6784 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006785}
6786
6787/*
6788 * Handle a link up interrupt from the 8051.
6789 *
6790 * This is a work-queue function outside of the interrupt.
6791 */
6792void handle_link_up(struct work_struct *work)
6793{
6794 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6795 link_up_work);
6796 set_link_state(ppd, HLS_UP_INIT);
6797
6798 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6799 read_ltp_rtt(ppd->dd);
6800 /*
6801 * OPA specifies that certain counters are cleared on a transition
6802 * to link up, so do that.
6803 */
6804 clear_linkup_counters(ppd->dd);
6805 /*
6806 * And (re)set link up default values.
6807 */
6808 set_linkup_defaults(ppd);
6809
6810 /* enforce link speed enabled */
6811 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6812 /* oops - current speed is not enabled, bounce */
6813 dd_dev_err(ppd->dd,
6814 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6815 ppd->link_speed_active, ppd->link_speed_enabled);
6816 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6817 OPA_LINKDOWN_REASON_SPEED_POLICY);
6818 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006819 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006820 start_link(ppd);
6821 }
6822}
6823
Jubin John4d114fd2016-02-14 20:21:43 -08006824/*
6825 * Several pieces of LNI information were cached for SMA in ppd.
6826 * Reset these on link down
6827 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006828static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6829{
6830 ppd->neighbor_guid = 0;
6831 ppd->neighbor_port_number = 0;
6832 ppd->neighbor_type = 0;
6833 ppd->neighbor_fm_security = 0;
6834}
6835
6836/*
6837 * Handle a link down interrupt from the 8051.
6838 *
6839 * This is a work-queue function outside of the interrupt.
6840 */
6841void handle_link_down(struct work_struct *work)
6842{
6843 u8 lcl_reason, neigh_reason = 0;
6844 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6845 link_down_work);
6846
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006847 if ((ppd->host_link_state &
6848 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6849 ppd->port_type == PORT_TYPE_FIXED)
6850 ppd->offline_disabled_reason =
6851 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6852
6853 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006854 set_link_state(ppd, HLS_DN_OFFLINE);
6855
6856 lcl_reason = 0;
6857 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6858
6859 /*
6860 * If no reason, assume peer-initiated but missed
6861 * LinkGoingDown idle flits.
6862 */
6863 if (neigh_reason == 0)
6864 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6865
6866 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6867
6868 reset_neighbor_info(ppd);
6869
6870 /* disable the port */
6871 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6872
Jubin John4d114fd2016-02-14 20:21:43 -08006873 /*
6874 * If there is no cable attached, turn the DC off. Otherwise,
6875 * start the link bring up.
6876 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006877 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006878 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006879 } else {
6880 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006881 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006882 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006883}
6884
6885void handle_link_bounce(struct work_struct *work)
6886{
6887 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6888 link_bounce_work);
6889
6890 /*
6891 * Only do something if the link is currently up.
6892 */
6893 if (ppd->host_link_state & HLS_UP) {
6894 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006895 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006896 start_link(ppd);
6897 } else {
6898 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6899 __func__, link_state_name(ppd->host_link_state));
6900 }
6901}
6902
6903/*
6904 * Mask conversion: Capability exchange to Port LTP. The capability
6905 * exchange has an implicit 16b CRC that is mandatory.
6906 */
6907static int cap_to_port_ltp(int cap)
6908{
6909 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6910
6911 if (cap & CAP_CRC_14B)
6912 port_ltp |= PORT_LTP_CRC_MODE_14;
6913 if (cap & CAP_CRC_48B)
6914 port_ltp |= PORT_LTP_CRC_MODE_48;
6915 if (cap & CAP_CRC_12B_16B_PER_LANE)
6916 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6917
6918 return port_ltp;
6919}
6920
6921/*
6922 * Convert an OPA Port LTP mask to capability mask
6923 */
6924int port_ltp_to_cap(int port_ltp)
6925{
6926 int cap_mask = 0;
6927
6928 if (port_ltp & PORT_LTP_CRC_MODE_14)
6929 cap_mask |= CAP_CRC_14B;
6930 if (port_ltp & PORT_LTP_CRC_MODE_48)
6931 cap_mask |= CAP_CRC_48B;
6932 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6933 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6934
6935 return cap_mask;
6936}
6937
6938/*
6939 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6940 */
6941static int lcb_to_port_ltp(int lcb_crc)
6942{
6943 int port_ltp = 0;
6944
6945 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6946 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6947 else if (lcb_crc == LCB_CRC_48B)
6948 port_ltp = PORT_LTP_CRC_MODE_48;
6949 else if (lcb_crc == LCB_CRC_14B)
6950 port_ltp = PORT_LTP_CRC_MODE_14;
6951 else
6952 port_ltp = PORT_LTP_CRC_MODE_16;
6953
6954 return port_ltp;
6955}
6956
6957/*
6958 * Our neighbor has indicated that we are allowed to act as a fabric
6959 * manager, so place the full management partition key in the second
6960 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6961 * that we should already have the limited management partition key in
6962 * array element 1, and also that the port is not yet up when
6963 * add_full_mgmt_pkey() is invoked.
6964 */
6965static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6966{
6967 struct hfi1_devdata *dd = ppd->dd;
6968
Dean Luick87645222015-12-01 15:38:21 -05006969 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6970 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6971 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6972 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006973 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6974 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6975}
6976
6977/*
6978 * Convert the given link width to the OPA link width bitmask.
6979 */
6980static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6981{
6982 switch (width) {
6983 case 0:
6984 /*
6985 * Simulator and quick linkup do not set the width.
6986 * Just set it to 4x without complaint.
6987 */
6988 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6989 return OPA_LINK_WIDTH_4X;
6990 return 0; /* no lanes up */
6991 case 1: return OPA_LINK_WIDTH_1X;
6992 case 2: return OPA_LINK_WIDTH_2X;
6993 case 3: return OPA_LINK_WIDTH_3X;
6994 default:
6995 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6996 __func__, width);
6997 /* fall through */
6998 case 4: return OPA_LINK_WIDTH_4X;
6999 }
7000}
7001
7002/*
7003 * Do a population count on the bottom nibble.
7004 */
7005static const u8 bit_counts[16] = {
7006 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7007};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007008
Mike Marciniszyn77241052015-07-30 15:17:43 -04007009static inline u8 nibble_to_count(u8 nibble)
7010{
7011 return bit_counts[nibble & 0xf];
7012}
7013
7014/*
7015 * Read the active lane information from the 8051 registers and return
7016 * their widths.
7017 *
7018 * Active lane information is found in these 8051 registers:
7019 * enable_lane_tx
7020 * enable_lane_rx
7021 */
7022static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7023 u16 *rx_width)
7024{
7025 u16 tx, rx;
7026 u8 enable_lane_rx;
7027 u8 enable_lane_tx;
7028 u8 tx_polarity_inversion;
7029 u8 rx_polarity_inversion;
7030 u8 max_rate;
7031
7032 /* read the active lanes */
7033 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7034 &rx_polarity_inversion, &max_rate);
7035 read_local_lni(dd, &enable_lane_rx);
7036
7037 /* convert to counts */
7038 tx = nibble_to_count(enable_lane_tx);
7039 rx = nibble_to_count(enable_lane_rx);
7040
7041 /*
7042 * Set link_speed_active here, overriding what was set in
7043 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7044 * set the max_rate field in handle_verify_cap until v0.19.
7045 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007046 if ((dd->icode == ICODE_RTL_SILICON) &&
7047 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007048 /* max_rate: 0 = 12.5G, 1 = 25G */
7049 switch (max_rate) {
7050 case 0:
7051 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7052 break;
7053 default:
7054 dd_dev_err(dd,
7055 "%s: unexpected max rate %d, using 25Gb\n",
7056 __func__, (int)max_rate);
7057 /* fall through */
7058 case 1:
7059 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7060 break;
7061 }
7062 }
7063
7064 dd_dev_info(dd,
7065 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7066 enable_lane_tx, tx, enable_lane_rx, rx);
7067 *tx_width = link_width_to_bits(dd, tx);
7068 *rx_width = link_width_to_bits(dd, rx);
7069}
7070
7071/*
7072 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7073 * Valid after the end of VerifyCap and during LinkUp. Does not change
7074 * after link up. I.e. look elsewhere for downgrade information.
7075 *
7076 * Bits are:
7077 * + bits [7:4] contain the number of active transmitters
7078 * + bits [3:0] contain the number of active receivers
7079 * These are numbers 1 through 4 and can be different values if the
7080 * link is asymmetric.
7081 *
7082 * verify_cap_local_fm_link_width[0] retains its original value.
7083 */
7084static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7085 u16 *rx_width)
7086{
7087 u16 widths, tx, rx;
7088 u8 misc_bits, local_flags;
7089 u16 active_tx, active_rx;
7090
7091 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7092 tx = widths >> 12;
7093 rx = (widths >> 8) & 0xf;
7094
7095 *tx_width = link_width_to_bits(dd, tx);
7096 *rx_width = link_width_to_bits(dd, rx);
7097
7098 /* print the active widths */
7099 get_link_widths(dd, &active_tx, &active_rx);
7100}
7101
7102/*
7103 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7104 * hardware information when the link first comes up.
7105 *
7106 * The link width is not available until after VerifyCap.AllFramesReceived
7107 * (the trigger for handle_verify_cap), so this is outside that routine
7108 * and should be called when the 8051 signals linkup.
7109 */
7110void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7111{
7112 u16 tx_width, rx_width;
7113
7114 /* get end-of-LNI link widths */
7115 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7116
7117 /* use tx_width as the link is supposed to be symmetric on link up */
7118 ppd->link_width_active = tx_width;
7119 /* link width downgrade active (LWD.A) starts out matching LW.A */
7120 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7121 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7122 /* per OPA spec, on link up LWD.E resets to LWD.S */
7123 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7124 /* cache the active egress rate (units {10^6 bits/sec]) */
7125 ppd->current_egress_rate = active_egress_rate(ppd);
7126}
7127
7128/*
7129 * Handle a verify capabilities interrupt from the 8051.
7130 *
7131 * This is a work-queue function outside of the interrupt.
7132 */
7133void handle_verify_cap(struct work_struct *work)
7134{
7135 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7136 link_vc_work);
7137 struct hfi1_devdata *dd = ppd->dd;
7138 u64 reg;
7139 u8 power_management;
7140 u8 continious;
7141 u8 vcu;
7142 u8 vau;
7143 u8 z;
7144 u16 vl15buf;
7145 u16 link_widths;
7146 u16 crc_mask;
7147 u16 crc_val;
7148 u16 device_id;
7149 u16 active_tx, active_rx;
7150 u8 partner_supported_crc;
7151 u8 remote_tx_rate;
7152 u8 device_rev;
7153
7154 set_link_state(ppd, HLS_VERIFY_CAP);
7155
7156 lcb_shutdown(dd, 0);
7157 adjust_lcb_for_fpga_serdes(dd);
7158
7159 /*
7160 * These are now valid:
7161 * remote VerifyCap fields in the general LNI config
7162 * CSR DC8051_STS_REMOTE_GUID
7163 * CSR DC8051_STS_REMOTE_NODE_TYPE
7164 * CSR DC8051_STS_REMOTE_FM_SECURITY
7165 * CSR DC8051_STS_REMOTE_PORT_NO
7166 */
7167
7168 read_vc_remote_phy(dd, &power_management, &continious);
7169 read_vc_remote_fabric(
7170 dd,
7171 &vau,
7172 &z,
7173 &vcu,
7174 &vl15buf,
7175 &partner_supported_crc);
7176 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7177 read_remote_device_id(dd, &device_id, &device_rev);
7178 /*
7179 * And the 'MgmtAllowed' information, which is exchanged during
7180 * LNI, is also be available at this point.
7181 */
7182 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7183 /* print the active widths */
7184 get_link_widths(dd, &active_tx, &active_rx);
7185 dd_dev_info(dd,
7186 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7187 (int)power_management, (int)continious);
7188 dd_dev_info(dd,
7189 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7190 (int)vau,
7191 (int)z,
7192 (int)vcu,
7193 (int)vl15buf,
7194 (int)partner_supported_crc);
7195 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7196 (u32)remote_tx_rate, (u32)link_widths);
7197 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7198 (u32)device_id, (u32)device_rev);
7199 /*
7200 * The peer vAU value just read is the peer receiver value. HFI does
7201 * not support a transmit vAU of 0 (AU == 8). We advertised that
7202 * with Z=1 in the fabric capabilities sent to the peer. The peer
7203 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7204 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7205 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7206 * subject to the Z value exception.
7207 */
7208 if (vau == 0)
7209 vau = 1;
7210 set_up_vl15(dd, vau, vl15buf);
7211
7212 /* set up the LCB CRC mode */
7213 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7214
7215 /* order is important: use the lowest bit in common */
7216 if (crc_mask & CAP_CRC_14B)
7217 crc_val = LCB_CRC_14B;
7218 else if (crc_mask & CAP_CRC_48B)
7219 crc_val = LCB_CRC_48B;
7220 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7221 crc_val = LCB_CRC_12B_16B_PER_LANE;
7222 else
7223 crc_val = LCB_CRC_16B;
7224
7225 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7226 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7227 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7228
7229 /* set (14b only) or clear sideband credit */
7230 reg = read_csr(dd, SEND_CM_CTRL);
7231 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7232 write_csr(dd, SEND_CM_CTRL,
7233 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7234 } else {
7235 write_csr(dd, SEND_CM_CTRL,
7236 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7237 }
7238
7239 ppd->link_speed_active = 0; /* invalid value */
7240 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7241 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7242 switch (remote_tx_rate) {
7243 case 0:
7244 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7245 break;
7246 case 1:
7247 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7248 break;
7249 }
7250 } else {
7251 /* actual rate is highest bit of the ANDed rates */
7252 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7253
7254 if (rate & 2)
7255 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7256 else if (rate & 1)
7257 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7258 }
7259 if (ppd->link_speed_active == 0) {
7260 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7261 __func__, (int)remote_tx_rate);
7262 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7263 }
7264
7265 /*
7266 * Cache the values of the supported, enabled, and active
7267 * LTP CRC modes to return in 'portinfo' queries. But the bit
7268 * flags that are returned in the portinfo query differ from
7269 * what's in the link_crc_mask, crc_sizes, and crc_val
7270 * variables. Convert these here.
7271 */
7272 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7273 /* supported crc modes */
7274 ppd->port_ltp_crc_mode |=
7275 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7276 /* enabled crc modes */
7277 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7278 /* active crc mode */
7279
7280 /* set up the remote credit return table */
7281 assign_remote_cm_au_table(dd, vcu);
7282
7283 /*
7284 * The LCB is reset on entry to handle_verify_cap(), so this must
7285 * be applied on every link up.
7286 *
7287 * Adjust LCB error kill enable to kill the link if
7288 * these RBUF errors are seen:
7289 * REPLAY_BUF_MBE_SMASK
7290 * FLIT_INPUT_BUF_MBE_SMASK
7291 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007292 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007293 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7294 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7295 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7296 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7297 }
7298
7299 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7300 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7301
7302 /* give 8051 access to the LCB CSRs */
7303 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7304 set_8051_lcb_access(dd);
7305
7306 ppd->neighbor_guid =
7307 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7308 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7309 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7310 ppd->neighbor_type =
7311 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7312 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7313 ppd->neighbor_fm_security =
7314 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7315 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7316 dd_dev_info(dd,
7317 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7318 ppd->neighbor_guid, ppd->neighbor_type,
7319 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7320 if (ppd->mgmt_allowed)
7321 add_full_mgmt_pkey(ppd);
7322
7323 /* tell the 8051 to go to LinkUp */
7324 set_link_state(ppd, HLS_GOING_UP);
7325}
7326
7327/*
7328 * Apply the link width downgrade enabled policy against the current active
7329 * link widths.
7330 *
7331 * Called when the enabled policy changes or the active link widths change.
7332 */
7333void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7334{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007335 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007336 int tries;
7337 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007338 u16 tx, rx;
7339
Dean Luick323fd782015-11-16 21:59:24 -05007340 /* use the hls lock to avoid a race with actual link up */
7341 tries = 0;
7342retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007343 mutex_lock(&ppd->hls_lock);
7344 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007345 if (!(ppd->host_link_state & HLS_UP)) {
7346 /* still going up..wait and retry */
7347 if (ppd->host_link_state & HLS_GOING_UP) {
7348 if (++tries < 1000) {
7349 mutex_unlock(&ppd->hls_lock);
7350 usleep_range(100, 120); /* arbitrary */
7351 goto retry;
7352 }
7353 dd_dev_err(ppd->dd,
7354 "%s: giving up waiting for link state change\n",
7355 __func__);
7356 }
7357 goto done;
7358 }
7359
7360 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007361
7362 if (refresh_widths) {
7363 get_link_widths(ppd->dd, &tx, &rx);
7364 ppd->link_width_downgrade_tx_active = tx;
7365 ppd->link_width_downgrade_rx_active = rx;
7366 }
7367
7368 if (lwde == 0) {
7369 /* downgrade is disabled */
7370
7371 /* bounce if not at starting active width */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007372 if ((ppd->link_width_active != ppd->link_width_downgrade_tx_active) ||
7373 (ppd->link_width_active != ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007374 dd_dev_err(ppd->dd,
7375 "Link downgrade is disabled and link has downgraded, downing link\n");
7376 dd_dev_err(ppd->dd,
7377 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7378 ppd->link_width_active,
7379 ppd->link_width_downgrade_tx_active,
7380 ppd->link_width_downgrade_rx_active);
7381 do_bounce = 1;
7382 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007383 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7384 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007385 /* Tx or Rx is outside the enabled policy */
7386 dd_dev_err(ppd->dd,
7387 "Link is outside of downgrade allowed, downing link\n");
7388 dd_dev_err(ppd->dd,
7389 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7390 lwde,
7391 ppd->link_width_downgrade_tx_active,
7392 ppd->link_width_downgrade_rx_active);
7393 do_bounce = 1;
7394 }
7395
Dean Luick323fd782015-11-16 21:59:24 -05007396done:
7397 mutex_unlock(&ppd->hls_lock);
7398
Mike Marciniszyn77241052015-07-30 15:17:43 -04007399 if (do_bounce) {
7400 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7401 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7402 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007403 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007404 start_link(ppd);
7405 }
7406}
7407
7408/*
7409 * Handle a link downgrade interrupt from the 8051.
7410 *
7411 * This is a work-queue function outside of the interrupt.
7412 */
7413void handle_link_downgrade(struct work_struct *work)
7414{
7415 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7416 link_downgrade_work);
7417
7418 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7419 apply_link_downgrade_policy(ppd, 1);
7420}
7421
7422static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7423{
7424 return flag_string(buf, buf_len, flags, dcc_err_flags,
7425 ARRAY_SIZE(dcc_err_flags));
7426}
7427
7428static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7429{
7430 return flag_string(buf, buf_len, flags, lcb_err_flags,
7431 ARRAY_SIZE(lcb_err_flags));
7432}
7433
7434static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7435{
7436 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7437 ARRAY_SIZE(dc8051_err_flags));
7438}
7439
7440static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7441{
7442 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7443 ARRAY_SIZE(dc8051_info_err_flags));
7444}
7445
7446static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7447{
7448 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7449 ARRAY_SIZE(dc8051_info_host_msg_flags));
7450}
7451
7452static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7453{
7454 struct hfi1_pportdata *ppd = dd->pport;
7455 u64 info, err, host_msg;
7456 int queue_link_down = 0;
7457 char buf[96];
7458
7459 /* look at the flags */
7460 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7461 /* 8051 information set by firmware */
7462 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7463 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7464 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7465 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7466 host_msg = (info >>
7467 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7468 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7469
7470 /*
7471 * Handle error flags.
7472 */
7473 if (err & FAILED_LNI) {
7474 /*
7475 * LNI error indications are cleared by the 8051
7476 * only when starting polling. Only pay attention
7477 * to them when in the states that occur during
7478 * LNI.
7479 */
7480 if (ppd->host_link_state
7481 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7482 queue_link_down = 1;
7483 dd_dev_info(dd, "Link error: %s\n",
7484 dc8051_info_err_string(buf,
7485 sizeof(buf),
7486 err & FAILED_LNI));
7487 }
7488 err &= ~(u64)FAILED_LNI;
7489 }
Dean Luick6d014532015-12-01 15:38:23 -05007490 /* unknown frames can happen durning LNI, just count */
7491 if (err & UNKNOWN_FRAME) {
7492 ppd->unknown_frame_count++;
7493 err &= ~(u64)UNKNOWN_FRAME;
7494 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007495 if (err) {
7496 /* report remaining errors, but do not do anything */
7497 dd_dev_err(dd, "8051 info error: %s\n",
7498 dc8051_info_err_string(buf, sizeof(buf), err));
7499 }
7500
7501 /*
7502 * Handle host message flags.
7503 */
7504 if (host_msg & HOST_REQ_DONE) {
7505 /*
7506 * Presently, the driver does a busy wait for
7507 * host requests to complete. This is only an
7508 * informational message.
7509 * NOTE: The 8051 clears the host message
7510 * information *on the next 8051 command*.
7511 * Therefore, when linkup is achieved,
7512 * this flag will still be set.
7513 */
7514 host_msg &= ~(u64)HOST_REQ_DONE;
7515 }
7516 if (host_msg & BC_SMA_MSG) {
7517 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7518 host_msg &= ~(u64)BC_SMA_MSG;
7519 }
7520 if (host_msg & LINKUP_ACHIEVED) {
7521 dd_dev_info(dd, "8051: Link up\n");
7522 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7523 host_msg &= ~(u64)LINKUP_ACHIEVED;
7524 }
7525 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007526 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007527 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7528 }
7529 if (host_msg & VERIFY_CAP_FRAME) {
7530 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7531 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7532 }
7533 if (host_msg & LINK_GOING_DOWN) {
7534 const char *extra = "";
7535 /* no downgrade action needed if going down */
7536 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7537 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7538 extra = " (ignoring downgrade)";
7539 }
7540 dd_dev_info(dd, "8051: Link down%s\n", extra);
7541 queue_link_down = 1;
7542 host_msg &= ~(u64)LINK_GOING_DOWN;
7543 }
7544 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7545 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7546 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7547 }
7548 if (host_msg) {
7549 /* report remaining messages, but do not do anything */
7550 dd_dev_info(dd, "8051 info host message: %s\n",
7551 dc8051_info_host_msg_string(buf, sizeof(buf),
7552 host_msg));
7553 }
7554
7555 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7556 }
7557 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7558 /*
7559 * Lost the 8051 heartbeat. If this happens, we
7560 * receive constant interrupts about it. Disable
7561 * the interrupt after the first.
7562 */
7563 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7564 write_csr(dd, DC_DC8051_ERR_EN,
7565 read_csr(dd, DC_DC8051_ERR_EN)
7566 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7567
7568 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7569 }
7570 if (reg) {
7571 /* report the error, but do not do anything */
7572 dd_dev_err(dd, "8051 error: %s\n",
7573 dc8051_err_string(buf, sizeof(buf), reg));
7574 }
7575
7576 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007577 /*
7578 * if the link is already going down or disabled, do not
7579 * queue another
7580 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007581 if ((ppd->host_link_state &
7582 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7583 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007584 dd_dev_info(dd, "%s: not queuing link down\n",
7585 __func__);
7586 } else {
7587 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7588 }
7589 }
7590}
7591
7592static const char * const fm_config_txt[] = {
7593[0] =
7594 "BadHeadDist: Distance violation between two head flits",
7595[1] =
7596 "BadTailDist: Distance violation between two tail flits",
7597[2] =
7598 "BadCtrlDist: Distance violation between two credit control flits",
7599[3] =
7600 "BadCrdAck: Credits return for unsupported VL",
7601[4] =
7602 "UnsupportedVLMarker: Received VL Marker",
7603[5] =
7604 "BadPreempt: Exceeded the preemption nesting level",
7605[6] =
7606 "BadControlFlit: Received unsupported control flit",
7607/* no 7 */
7608[8] =
7609 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7610};
7611
7612static const char * const port_rcv_txt[] = {
7613[1] =
7614 "BadPktLen: Illegal PktLen",
7615[2] =
7616 "PktLenTooLong: Packet longer than PktLen",
7617[3] =
7618 "PktLenTooShort: Packet shorter than PktLen",
7619[4] =
7620 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7621[5] =
7622 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7623[6] =
7624 "BadL2: Illegal L2 opcode",
7625[7] =
7626 "BadSC: Unsupported SC",
7627[9] =
7628 "BadRC: Illegal RC",
7629[11] =
7630 "PreemptError: Preempting with same VL",
7631[12] =
7632 "PreemptVL15: Preempting a VL15 packet",
7633};
7634
7635#define OPA_LDR_FMCONFIG_OFFSET 16
7636#define OPA_LDR_PORTRCV_OFFSET 0
7637static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7638{
7639 u64 info, hdr0, hdr1;
7640 const char *extra;
7641 char buf[96];
7642 struct hfi1_pportdata *ppd = dd->pport;
7643 u8 lcl_reason = 0;
7644 int do_bounce = 0;
7645
7646 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7647 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7648 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7649 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7650 /* set status bit */
7651 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7652 }
7653 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7654 }
7655
7656 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7657 struct hfi1_pportdata *ppd = dd->pport;
7658 /* this counter saturates at (2^32) - 1 */
7659 if (ppd->link_downed < (u32)UINT_MAX)
7660 ppd->link_downed++;
7661 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7662 }
7663
7664 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7665 u8 reason_valid = 1;
7666
7667 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7668 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7669 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7670 /* set status bit */
7671 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7672 }
7673 switch (info) {
7674 case 0:
7675 case 1:
7676 case 2:
7677 case 3:
7678 case 4:
7679 case 5:
7680 case 6:
7681 extra = fm_config_txt[info];
7682 break;
7683 case 8:
7684 extra = fm_config_txt[info];
7685 if (ppd->port_error_action &
7686 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7687 do_bounce = 1;
7688 /*
7689 * lcl_reason cannot be derived from info
7690 * for this error
7691 */
7692 lcl_reason =
7693 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7694 }
7695 break;
7696 default:
7697 reason_valid = 0;
7698 snprintf(buf, sizeof(buf), "reserved%lld", info);
7699 extra = buf;
7700 break;
7701 }
7702
7703 if (reason_valid && !do_bounce) {
7704 do_bounce = ppd->port_error_action &
7705 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7706 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7707 }
7708
7709 /* just report this */
7710 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7711 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7712 }
7713
7714 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7715 u8 reason_valid = 1;
7716
7717 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7718 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7719 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7720 if (!(dd->err_info_rcvport.status_and_code &
7721 OPA_EI_STATUS_SMASK)) {
7722 dd->err_info_rcvport.status_and_code =
7723 info & OPA_EI_CODE_SMASK;
7724 /* set status bit */
7725 dd->err_info_rcvport.status_and_code |=
7726 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007727 /*
7728 * save first 2 flits in the packet that caused
7729 * the error
7730 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007731 dd->err_info_rcvport.packet_flit1 = hdr0;
7732 dd->err_info_rcvport.packet_flit2 = hdr1;
7733 }
7734 switch (info) {
7735 case 1:
7736 case 2:
7737 case 3:
7738 case 4:
7739 case 5:
7740 case 6:
7741 case 7:
7742 case 9:
7743 case 11:
7744 case 12:
7745 extra = port_rcv_txt[info];
7746 break;
7747 default:
7748 reason_valid = 0;
7749 snprintf(buf, sizeof(buf), "reserved%lld", info);
7750 extra = buf;
7751 break;
7752 }
7753
7754 if (reason_valid && !do_bounce) {
7755 do_bounce = ppd->port_error_action &
7756 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7757 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7758 }
7759
7760 /* just report this */
7761 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7762 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7763 hdr0, hdr1);
7764
7765 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7766 }
7767
7768 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7769 /* informative only */
7770 dd_dev_info(dd, "8051 access to LCB blocked\n");
7771 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7772 }
7773 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7774 /* informative only */
7775 dd_dev_info(dd, "host access to LCB blocked\n");
7776 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7777 }
7778
7779 /* report any remaining errors */
7780 if (reg)
7781 dd_dev_info(dd, "DCC Error: %s\n",
7782 dcc_err_string(buf, sizeof(buf), reg));
7783
7784 if (lcl_reason == 0)
7785 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7786
7787 if (do_bounce) {
7788 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7789 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7790 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7791 }
7792}
7793
7794static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7795{
7796 char buf[96];
7797
7798 dd_dev_info(dd, "LCB Error: %s\n",
7799 lcb_err_string(buf, sizeof(buf), reg));
7800}
7801
7802/*
7803 * CCE block DC interrupt. Source is < 8.
7804 */
7805static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7806{
7807 const struct err_reg_info *eri = &dc_errs[source];
7808
7809 if (eri->handler) {
7810 interrupt_clear_down(dd, 0, eri);
7811 } else if (source == 3 /* dc_lbm_int */) {
7812 /*
7813 * This indicates that a parity error has occurred on the
7814 * address/control lines presented to the LBM. The error
7815 * is a single pulse, there is no associated error flag,
7816 * and it is non-maskable. This is because if a parity
7817 * error occurs on the request the request is dropped.
7818 * This should never occur, but it is nice to know if it
7819 * ever does.
7820 */
7821 dd_dev_err(dd, "Parity error in DC LBM block\n");
7822 } else {
7823 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7824 }
7825}
7826
7827/*
7828 * TX block send credit interrupt. Source is < 160.
7829 */
7830static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7831{
7832 sc_group_release_update(dd, source);
7833}
7834
7835/*
7836 * TX block SDMA interrupt. Source is < 48.
7837 *
7838 * SDMA interrupts are grouped by type:
7839 *
7840 * 0 - N-1 = SDma
7841 * N - 2N-1 = SDmaProgress
7842 * 2N - 3N-1 = SDmaIdle
7843 */
7844static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7845{
7846 /* what interrupt */
7847 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7848 /* which engine */
7849 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7850
7851#ifdef CONFIG_SDMA_VERBOSITY
7852 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7853 slashstrip(__FILE__), __LINE__, __func__);
7854 sdma_dumpstate(&dd->per_sdma[which]);
7855#endif
7856
7857 if (likely(what < 3 && which < dd->num_sdma)) {
7858 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7859 } else {
7860 /* should not happen */
7861 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7862 }
7863}
7864
7865/*
7866 * RX block receive available interrupt. Source is < 160.
7867 */
7868static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7869{
7870 struct hfi1_ctxtdata *rcd;
7871 char *err_detail;
7872
7873 if (likely(source < dd->num_rcv_contexts)) {
7874 rcd = dd->rcd[source];
7875 if (rcd) {
7876 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007877 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007878 else
7879 handle_user_interrupt(rcd);
7880 return; /* OK */
7881 }
7882 /* received an interrupt, but no rcd */
7883 err_detail = "dataless";
7884 } else {
7885 /* received an interrupt, but are not using that context */
7886 err_detail = "out of range";
7887 }
7888 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7889 err_detail, source);
7890}
7891
7892/*
7893 * RX block receive urgent interrupt. Source is < 160.
7894 */
7895static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7896{
7897 struct hfi1_ctxtdata *rcd;
7898 char *err_detail;
7899
7900 if (likely(source < dd->num_rcv_contexts)) {
7901 rcd = dd->rcd[source];
7902 if (rcd) {
7903 /* only pay attention to user urgent interrupts */
7904 if (source >= dd->first_user_ctxt)
7905 handle_user_interrupt(rcd);
7906 return; /* OK */
7907 }
7908 /* received an interrupt, but no rcd */
7909 err_detail = "dataless";
7910 } else {
7911 /* received an interrupt, but are not using that context */
7912 err_detail = "out of range";
7913 }
7914 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7915 err_detail, source);
7916}
7917
7918/*
7919 * Reserved range interrupt. Should not be called in normal operation.
7920 */
7921static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7922{
7923 char name[64];
7924
7925 dd_dev_err(dd, "unexpected %s interrupt\n",
7926 is_reserved_name(name, sizeof(name), source));
7927}
7928
7929static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08007930/*
7931 * start end
7932 * name func interrupt func
7933 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007934{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7935 is_misc_err_name, is_misc_err_int },
7936{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7937 is_sdma_eng_err_name, is_sdma_eng_err_int },
7938{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7939 is_sendctxt_err_name, is_sendctxt_err_int },
7940{ IS_SDMA_START, IS_SDMA_END,
7941 is_sdma_eng_name, is_sdma_eng_int },
7942{ IS_VARIOUS_START, IS_VARIOUS_END,
7943 is_various_name, is_various_int },
7944{ IS_DC_START, IS_DC_END,
7945 is_dc_name, is_dc_int },
7946{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7947 is_rcv_avail_name, is_rcv_avail_int },
7948{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7949 is_rcv_urgent_name, is_rcv_urgent_int },
7950{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7951 is_send_credit_name, is_send_credit_int},
7952{ IS_RESERVED_START, IS_RESERVED_END,
7953 is_reserved_name, is_reserved_int},
7954};
7955
7956/*
7957 * Interrupt source interrupt - called when the given source has an interrupt.
7958 * Source is a bit index into an array of 64-bit integers.
7959 */
7960static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7961{
7962 const struct is_table *entry;
7963
7964 /* avoids a double compare by walking the table in-order */
7965 for (entry = &is_table[0]; entry->is_name; entry++) {
7966 if (source < entry->end) {
7967 trace_hfi1_interrupt(dd, entry, source);
7968 entry->is_int(dd, source - entry->start);
7969 return;
7970 }
7971 }
7972 /* fell off the end */
7973 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7974}
7975
7976/*
7977 * General interrupt handler. This is able to correctly handle
7978 * all interrupts in case INTx is used.
7979 */
7980static irqreturn_t general_interrupt(int irq, void *data)
7981{
7982 struct hfi1_devdata *dd = data;
7983 u64 regs[CCE_NUM_INT_CSRS];
7984 u32 bit;
7985 int i;
7986
7987 this_cpu_inc(*dd->int_counter);
7988
7989 /* phase 1: scan and clear all handled interrupts */
7990 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7991 if (dd->gi_mask[i] == 0) {
7992 regs[i] = 0; /* used later */
7993 continue;
7994 }
7995 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7996 dd->gi_mask[i];
7997 /* only clear if anything is set */
7998 if (regs[i])
7999 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8000 }
8001
8002 /* phase 2: call the appropriate handler */
8003 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John8638b772016-02-14 20:19:24 -08008004 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008005 is_interrupt(dd, bit);
8006 }
8007
8008 return IRQ_HANDLED;
8009}
8010
8011static irqreturn_t sdma_interrupt(int irq, void *data)
8012{
8013 struct sdma_engine *sde = data;
8014 struct hfi1_devdata *dd = sde->dd;
8015 u64 status;
8016
8017#ifdef CONFIG_SDMA_VERBOSITY
8018 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8019 slashstrip(__FILE__), __LINE__, __func__);
8020 sdma_dumpstate(sde);
8021#endif
8022
8023 this_cpu_inc(*dd->int_counter);
8024
8025 /* This read_csr is really bad in the hot path */
8026 status = read_csr(dd,
Jubin John8638b772016-02-14 20:19:24 -08008027 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
Mike Marciniszyn77241052015-07-30 15:17:43 -04008028 & sde->imask;
8029 if (likely(status)) {
8030 /* clear the interrupt(s) */
8031 write_csr(dd,
Jubin John8638b772016-02-14 20:19:24 -08008032 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
Mike Marciniszyn77241052015-07-30 15:17:43 -04008033 status);
8034
8035 /* handle the interrupt(s) */
8036 sdma_engine_interrupt(sde, status);
8037 } else
8038 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8039 sde->this_idx);
8040
8041 return IRQ_HANDLED;
8042}
8043
8044/*
Dean Luickecd42f82016-02-03 14:35:14 -08008045 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8046 * to insure that the write completed. This does NOT guarantee that
8047 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008048 */
8049static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8050{
8051 struct hfi1_devdata *dd = rcd->dd;
8052 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8053
8054 mmiowb(); /* make sure everything before is written */
8055 write_csr(dd, addr, rcd->imask);
8056 /* force the above write on the chip and get a value back */
8057 (void)read_csr(dd, addr);
8058}
8059
8060/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008061void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008062{
8063 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8064}
8065
Dean Luickecd42f82016-02-03 14:35:14 -08008066/*
8067 * Return non-zero if a packet is present.
8068 *
8069 * This routine is called when rechecking for packets after the RcvAvail
8070 * interrupt has been cleared down. First, do a quick check of memory for
8071 * a packet present. If not found, use an expensive CSR read of the context
8072 * tail to determine the actual tail. The CSR read is necessary because there
8073 * is no method to push pending DMAs to memory other than an interrupt and we
8074 * are trying to determine if we need to force an interrupt.
8075 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008076static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8077{
Dean Luickecd42f82016-02-03 14:35:14 -08008078 u32 tail;
8079 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008080
Dean Luickecd42f82016-02-03 14:35:14 -08008081 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8082 present = (rcd->seq_cnt ==
8083 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8084 else /* is RDMA rtail */
8085 present = (rcd->head != get_rcvhdrtail(rcd));
8086
8087 if (present)
8088 return 1;
8089
8090 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8091 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8092 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008093}
8094
8095/*
8096 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8097 * This routine will try to handle packets immediately (latency), but if
8098 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008099 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008100 * invoked) is finished. The intent is to avoid extra interrupts while we
8101 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008102 */
8103static irqreturn_t receive_context_interrupt(int irq, void *data)
8104{
8105 struct hfi1_ctxtdata *rcd = data;
8106 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008107 int disposition;
8108 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008109
8110 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8111 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008112 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008113
Dean Luickf4f30031c2015-10-26 10:28:44 -04008114 /* receive interrupt remains blocked while processing packets */
8115 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008116
Dean Luickf4f30031c2015-10-26 10:28:44 -04008117 /*
8118 * Too many packets were seen while processing packets in this
8119 * IRQ handler. Invoke the handler thread. The receive interrupt
8120 * remains blocked.
8121 */
8122 if (disposition == RCV_PKT_LIMIT)
8123 return IRQ_WAKE_THREAD;
8124
8125 /*
8126 * The packet processor detected no more packets. Clear the receive
8127 * interrupt and recheck for a packet packet that may have arrived
8128 * after the previous check and interrupt clear. If a packet arrived,
8129 * force another interrupt.
8130 */
8131 clear_recv_intr(rcd);
8132 present = check_packet_present(rcd);
8133 if (present)
8134 force_recv_intr(rcd);
8135
8136 return IRQ_HANDLED;
8137}
8138
8139/*
8140 * Receive packet thread handler. This expects to be invoked with the
8141 * receive interrupt still blocked.
8142 */
8143static irqreturn_t receive_context_thread(int irq, void *data)
8144{
8145 struct hfi1_ctxtdata *rcd = data;
8146 int present;
8147
8148 /* receive interrupt is still blocked from the IRQ handler */
8149 (void)rcd->do_interrupt(rcd, 1);
8150
8151 /*
8152 * The packet processor will only return if it detected no more
8153 * packets. Hold IRQs here so we can safely clear the interrupt and
8154 * recheck for a packet that may have arrived after the previous
8155 * check and the interrupt clear. If a packet arrived, force another
8156 * interrupt.
8157 */
8158 local_irq_disable();
8159 clear_recv_intr(rcd);
8160 present = check_packet_present(rcd);
8161 if (present)
8162 force_recv_intr(rcd);
8163 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008164
8165 return IRQ_HANDLED;
8166}
8167
8168/* ========================================================================= */
8169
8170u32 read_physical_state(struct hfi1_devdata *dd)
8171{
8172 u64 reg;
8173
8174 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8175 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8176 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8177}
8178
Jim Snowfb9036d2016-01-11 18:32:21 -05008179u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008180{
8181 u64 reg;
8182
8183 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8184 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8185 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8186}
8187
8188static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8189{
8190 u64 reg;
8191
8192 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8193 /* clear current state, set new state */
8194 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8195 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8196 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8197}
8198
8199/*
8200 * Use the 8051 to read a LCB CSR.
8201 */
8202static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8203{
8204 u32 regno;
8205 int ret;
8206
8207 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8208 if (acquire_lcb_access(dd, 0) == 0) {
8209 *data = read_csr(dd, addr);
8210 release_lcb_access(dd, 0);
8211 return 0;
8212 }
8213 return -EBUSY;
8214 }
8215
8216 /* register is an index of LCB registers: (offset - base) / 8 */
8217 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8218 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8219 if (ret != HCMD_SUCCESS)
8220 return -EBUSY;
8221 return 0;
8222}
8223
8224/*
8225 * Read an LCB CSR. Access may not be in host control, so check.
8226 * Return 0 on success, -EBUSY on failure.
8227 */
8228int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8229{
8230 struct hfi1_pportdata *ppd = dd->pport;
8231
8232 /* if up, go through the 8051 for the value */
8233 if (ppd->host_link_state & HLS_UP)
8234 return read_lcb_via_8051(dd, addr, data);
8235 /* if going up or down, no access */
8236 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8237 return -EBUSY;
8238 /* otherwise, host has access */
8239 *data = read_csr(dd, addr);
8240 return 0;
8241}
8242
8243/*
8244 * Use the 8051 to write a LCB CSR.
8245 */
8246static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8247{
Dean Luick3bf40d62015-11-06 20:07:04 -05008248 u32 regno;
8249 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008250
Dean Luick3bf40d62015-11-06 20:07:04 -05008251 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8252 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8253 if (acquire_lcb_access(dd, 0) == 0) {
8254 write_csr(dd, addr, data);
8255 release_lcb_access(dd, 0);
8256 return 0;
8257 }
8258 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008259 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008260
8261 /* register is an index of LCB registers: (offset - base) / 8 */
8262 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8263 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8264 if (ret != HCMD_SUCCESS)
8265 return -EBUSY;
8266 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008267}
8268
8269/*
8270 * Write an LCB CSR. Access may not be in host control, so check.
8271 * Return 0 on success, -EBUSY on failure.
8272 */
8273int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8274{
8275 struct hfi1_pportdata *ppd = dd->pport;
8276
8277 /* if up, go through the 8051 for the value */
8278 if (ppd->host_link_state & HLS_UP)
8279 return write_lcb_via_8051(dd, addr, data);
8280 /* if going up or down, no access */
8281 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8282 return -EBUSY;
8283 /* otherwise, host has access */
8284 write_csr(dd, addr, data);
8285 return 0;
8286}
8287
8288/*
8289 * Returns:
8290 * < 0 = Linux error, not able to get access
8291 * > 0 = 8051 command RETURN_CODE
8292 */
8293static int do_8051_command(
8294 struct hfi1_devdata *dd,
8295 u32 type,
8296 u64 in_data,
8297 u64 *out_data)
8298{
8299 u64 reg, completed;
8300 int return_code;
8301 unsigned long flags;
8302 unsigned long timeout;
8303
8304 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8305
8306 /*
8307 * Alternative to holding the lock for a long time:
8308 * - keep busy wait - have other users bounce off
8309 */
8310 spin_lock_irqsave(&dd->dc8051_lock, flags);
8311
8312 /* We can't send any commands to the 8051 if it's in reset */
8313 if (dd->dc_shutdown) {
8314 return_code = -ENODEV;
8315 goto fail;
8316 }
8317
8318 /*
8319 * If an 8051 host command timed out previously, then the 8051 is
8320 * stuck.
8321 *
8322 * On first timeout, attempt to reset and restart the entire DC
8323 * block (including 8051). (Is this too big of a hammer?)
8324 *
8325 * If the 8051 times out a second time, the reset did not bring it
8326 * back to healthy life. In that case, fail any subsequent commands.
8327 */
8328 if (dd->dc8051_timed_out) {
8329 if (dd->dc8051_timed_out > 1) {
8330 dd_dev_err(dd,
8331 "Previous 8051 host command timed out, skipping command %u\n",
8332 type);
8333 return_code = -ENXIO;
8334 goto fail;
8335 }
8336 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8337 dc_shutdown(dd);
8338 dc_start(dd);
8339 spin_lock_irqsave(&dd->dc8051_lock, flags);
8340 }
8341
8342 /*
8343 * If there is no timeout, then the 8051 command interface is
8344 * waiting for a command.
8345 */
8346
8347 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008348 * When writing a LCB CSR, out_data contains the full value to
8349 * to be written, while in_data contains the relative LCB
8350 * address in 7:0. Do the work here, rather than the caller,
8351 * of distrubting the write data to where it needs to go:
8352 *
8353 * Write data
8354 * 39:00 -> in_data[47:8]
8355 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8356 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8357 */
8358 if (type == HCMD_WRITE_LCB_CSR) {
8359 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8360 reg = ((((*out_data) >> 40) & 0xff) <<
8361 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8362 | ((((*out_data) >> 48) & 0xffff) <<
8363 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8364 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8365 }
8366
8367 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008368 * Do two writes: the first to stabilize the type and req_data, the
8369 * second to activate.
8370 */
8371 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8372 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8373 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8374 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8375 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8376 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8377 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8378
8379 /* wait for completion, alternate: interrupt */
8380 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8381 while (1) {
8382 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8383 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8384 if (completed)
8385 break;
8386 if (time_after(jiffies, timeout)) {
8387 dd->dc8051_timed_out++;
8388 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8389 if (out_data)
8390 *out_data = 0;
8391 return_code = -ETIMEDOUT;
8392 goto fail;
8393 }
8394 udelay(2);
8395 }
8396
8397 if (out_data) {
8398 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8399 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8400 if (type == HCMD_READ_LCB_CSR) {
8401 /* top 16 bits are in a different register */
8402 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8403 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8404 << (48
8405 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8406 }
8407 }
8408 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8409 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8410 dd->dc8051_timed_out = 0;
8411 /*
8412 * Clear command for next user.
8413 */
8414 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8415
8416fail:
8417 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8418
8419 return return_code;
8420}
8421
8422static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8423{
8424 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8425}
8426
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008427int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8428 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008429{
8430 u64 data;
8431 int ret;
8432
8433 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8434 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8435 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8436 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8437 if (ret != HCMD_SUCCESS) {
8438 dd_dev_err(dd,
8439 "load 8051 config: field id %d, lane %d, err %d\n",
8440 (int)field_id, (int)lane_id, ret);
8441 }
8442 return ret;
8443}
8444
8445/*
8446 * Read the 8051 firmware "registers". Use the RAM directly. Always
8447 * set the result, even on error.
8448 * Return 0 on success, -errno on failure
8449 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008450int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8451 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008452{
8453 u64 big_data;
8454 u32 addr;
8455 int ret;
8456
8457 /* address start depends on the lane_id */
8458 if (lane_id < 4)
8459 addr = (4 * NUM_GENERAL_FIELDS)
8460 + (lane_id * 4 * NUM_LANE_FIELDS);
8461 else
8462 addr = 0;
8463 addr += field_id * 4;
8464
8465 /* read is in 8-byte chunks, hardware will truncate the address down */
8466 ret = read_8051_data(dd, addr, 8, &big_data);
8467
8468 if (ret == 0) {
8469 /* extract the 4 bytes we want */
8470 if (addr & 0x4)
8471 *result = (u32)(big_data >> 32);
8472 else
8473 *result = (u32)big_data;
8474 } else {
8475 *result = 0;
8476 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8477 __func__, lane_id, field_id);
8478 }
8479
8480 return ret;
8481}
8482
8483static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8484 u8 continuous)
8485{
8486 u32 frame;
8487
8488 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8489 | power_management << POWER_MANAGEMENT_SHIFT;
8490 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8491 GENERAL_CONFIG, frame);
8492}
8493
8494static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8495 u16 vl15buf, u8 crc_sizes)
8496{
8497 u32 frame;
8498
8499 frame = (u32)vau << VAU_SHIFT
8500 | (u32)z << Z_SHIFT
8501 | (u32)vcu << VCU_SHIFT
8502 | (u32)vl15buf << VL15BUF_SHIFT
8503 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8504 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8505 GENERAL_CONFIG, frame);
8506}
8507
8508static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8509 u8 *flag_bits, u16 *link_widths)
8510{
8511 u32 frame;
8512
8513 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8514 &frame);
8515 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8516 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8517 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8518}
8519
8520static int write_vc_local_link_width(struct hfi1_devdata *dd,
8521 u8 misc_bits,
8522 u8 flag_bits,
8523 u16 link_widths)
8524{
8525 u32 frame;
8526
8527 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8528 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8529 | (u32)link_widths << LINK_WIDTH_SHIFT;
8530 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8531 frame);
8532}
8533
8534static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8535 u8 device_rev)
8536{
8537 u32 frame;
8538
8539 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8540 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8541 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8542}
8543
8544static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8545 u8 *device_rev)
8546{
8547 u32 frame;
8548
8549 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8550 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8551 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8552 & REMOTE_DEVICE_REV_MASK;
8553}
8554
8555void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8556{
8557 u32 frame;
8558
8559 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8560 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8561 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8562}
8563
8564static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8565 u8 *continuous)
8566{
8567 u32 frame;
8568
8569 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8570 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8571 & POWER_MANAGEMENT_MASK;
8572 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8573 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8574}
8575
8576static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8577 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8578{
8579 u32 frame;
8580
8581 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8582 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8583 *z = (frame >> Z_SHIFT) & Z_MASK;
8584 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8585 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8586 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8587}
8588
8589static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8590 u8 *remote_tx_rate,
8591 u16 *link_widths)
8592{
8593 u32 frame;
8594
8595 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8596 &frame);
8597 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8598 & REMOTE_TX_RATE_MASK;
8599 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8600}
8601
8602static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8603{
8604 u32 frame;
8605
8606 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8607 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8608}
8609
8610static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8611{
8612 u32 frame;
8613
8614 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8615 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8616}
8617
8618static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8619{
8620 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8621}
8622
8623static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8624{
8625 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8626}
8627
8628void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8629{
8630 u32 frame;
8631 int ret;
8632
8633 *link_quality = 0;
8634 if (dd->pport->host_link_state & HLS_UP) {
8635 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8636 &frame);
8637 if (ret == 0)
8638 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8639 & LINK_QUALITY_MASK;
8640 }
8641}
8642
8643static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8644{
8645 u32 frame;
8646
8647 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8648 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8649}
8650
8651static int read_tx_settings(struct hfi1_devdata *dd,
8652 u8 *enable_lane_tx,
8653 u8 *tx_polarity_inversion,
8654 u8 *rx_polarity_inversion,
8655 u8 *max_rate)
8656{
8657 u32 frame;
8658 int ret;
8659
8660 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8661 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8662 & ENABLE_LANE_TX_MASK;
8663 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8664 & TX_POLARITY_INVERSION_MASK;
8665 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8666 & RX_POLARITY_INVERSION_MASK;
8667 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8668 return ret;
8669}
8670
8671static int write_tx_settings(struct hfi1_devdata *dd,
8672 u8 enable_lane_tx,
8673 u8 tx_polarity_inversion,
8674 u8 rx_polarity_inversion,
8675 u8 max_rate)
8676{
8677 u32 frame;
8678
8679 /* no need to mask, all variable sizes match field widths */
8680 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8681 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8682 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8683 | max_rate << MAX_RATE_SHIFT;
8684 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8685}
8686
8687static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8688{
8689 u32 frame, version, prod_id;
8690 int ret, lane;
8691
8692 /* 4 lanes */
8693 for (lane = 0; lane < 4; lane++) {
8694 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8695 if (ret) {
8696 dd_dev_err(
8697 dd,
8698 "Unable to read lane %d firmware details\n",
8699 lane);
8700 continue;
8701 }
8702 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8703 & SPICO_ROM_VERSION_MASK;
8704 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8705 & SPICO_ROM_PROD_ID_MASK;
8706 dd_dev_info(dd,
8707 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8708 lane, version, prod_id);
8709 }
8710}
8711
8712/*
8713 * Read an idle LCB message.
8714 *
8715 * Returns 0 on success, -EINVAL on error
8716 */
8717static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8718{
8719 int ret;
8720
8721 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8722 type, data_out);
8723 if (ret != HCMD_SUCCESS) {
8724 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8725 (u32)type, ret);
8726 return -EINVAL;
8727 }
8728 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8729 /* return only the payload as we already know the type */
8730 *data_out >>= IDLE_PAYLOAD_SHIFT;
8731 return 0;
8732}
8733
8734/*
8735 * Read an idle SMA message. To be done in response to a notification from
8736 * the 8051.
8737 *
8738 * Returns 0 on success, -EINVAL on error
8739 */
8740static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8741{
8742 return read_idle_message(dd,
8743 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8744}
8745
8746/*
8747 * Send an idle LCB message.
8748 *
8749 * Returns 0 on success, -EINVAL on error
8750 */
8751static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8752{
8753 int ret;
8754
8755 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8756 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8757 if (ret != HCMD_SUCCESS) {
8758 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8759 data, ret);
8760 return -EINVAL;
8761 }
8762 return 0;
8763}
8764
8765/*
8766 * Send an idle SMA message.
8767 *
8768 * Returns 0 on success, -EINVAL on error
8769 */
8770int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8771{
8772 u64 data;
8773
8774 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8775 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8776 return send_idle_message(dd, data);
8777}
8778
8779/*
8780 * Initialize the LCB then do a quick link up. This may or may not be
8781 * in loopback.
8782 *
8783 * return 0 on success, -errno on error
8784 */
8785static int do_quick_linkup(struct hfi1_devdata *dd)
8786{
8787 u64 reg;
8788 unsigned long timeout;
8789 int ret;
8790
8791 lcb_shutdown(dd, 0);
8792
8793 if (loopback) {
8794 /* LCB_CFG_LOOPBACK.VAL = 2 */
8795 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8796 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8797 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8798 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8799 }
8800
8801 /* start the LCBs */
8802 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8803 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8804
8805 /* simulator only loopback steps */
8806 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8807 /* LCB_CFG_RUN.EN = 1 */
8808 write_csr(dd, DC_LCB_CFG_RUN,
8809 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8810
8811 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8812 timeout = jiffies + msecs_to_jiffies(10);
8813 while (1) {
8814 reg = read_csr(dd,
8815 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8816 if (reg)
8817 break;
8818 if (time_after(jiffies, timeout)) {
8819 dd_dev_err(dd,
8820 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8821 return -ETIMEDOUT;
8822 }
8823 udelay(2);
8824 }
8825
8826 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8827 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8828 }
8829
8830 if (!loopback) {
8831 /*
8832 * When doing quick linkup and not in loopback, both
8833 * sides must be done with LCB set-up before either
8834 * starts the quick linkup. Put a delay here so that
8835 * both sides can be started and have a chance to be
8836 * done with LCB set up before resuming.
8837 */
8838 dd_dev_err(dd,
8839 "Pausing for peer to be finished with LCB set up\n");
8840 msleep(5000);
8841 dd_dev_err(dd,
8842 "Continuing with quick linkup\n");
8843 }
8844
8845 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8846 set_8051_lcb_access(dd);
8847
8848 /*
8849 * State "quick" LinkUp request sets the physical link state to
8850 * LinkUp without a verify capability sequence.
8851 * This state is in simulator v37 and later.
8852 */
8853 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8854 if (ret != HCMD_SUCCESS) {
8855 dd_dev_err(dd,
8856 "%s: set physical link state to quick LinkUp failed with return %d\n",
8857 __func__, ret);
8858
8859 set_host_lcb_access(dd);
8860 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8861
8862 if (ret >= 0)
8863 ret = -EINVAL;
8864 return ret;
8865 }
8866
8867 return 0; /* success */
8868}
8869
8870/*
8871 * Set the SerDes to internal loopback mode.
8872 * Returns 0 on success, -errno on error.
8873 */
8874static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8875{
8876 int ret;
8877
8878 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8879 if (ret == HCMD_SUCCESS)
8880 return 0;
8881 dd_dev_err(dd,
8882 "Set physical link state to SerDes Loopback failed with return %d\n",
8883 ret);
8884 if (ret >= 0)
8885 ret = -EINVAL;
8886 return ret;
8887}
8888
8889/*
8890 * Do all special steps to set up loopback.
8891 */
8892static int init_loopback(struct hfi1_devdata *dd)
8893{
8894 dd_dev_info(dd, "Entering loopback mode\n");
8895
8896 /* all loopbacks should disable self GUID check */
8897 write_csr(dd, DC_DC8051_CFG_MODE,
8898 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8899
8900 /*
8901 * The simulator has only one loopback option - LCB. Switch
8902 * to that option, which includes quick link up.
8903 *
8904 * Accept all valid loopback values.
8905 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08008906 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8907 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8908 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008909 loopback = LOOPBACK_LCB;
8910 quick_linkup = 1;
8911 return 0;
8912 }
8913
8914 /* handle serdes loopback */
8915 if (loopback == LOOPBACK_SERDES) {
8916 /* internal serdes loopack needs quick linkup on RTL */
8917 if (dd->icode == ICODE_RTL_SILICON)
8918 quick_linkup = 1;
8919 return set_serdes_loopback_mode(dd);
8920 }
8921
8922 /* LCB loopback - handled at poll time */
8923 if (loopback == LOOPBACK_LCB) {
8924 quick_linkup = 1; /* LCB is always quick linkup */
8925
8926 /* not supported in emulation due to emulation RTL changes */
8927 if (dd->icode == ICODE_FPGA_EMULATION) {
8928 dd_dev_err(dd,
8929 "LCB loopback not supported in emulation\n");
8930 return -EINVAL;
8931 }
8932 return 0;
8933 }
8934
8935 /* external cable loopback requires no extra steps */
8936 if (loopback == LOOPBACK_CABLE)
8937 return 0;
8938
8939 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8940 return -EINVAL;
8941}
8942
8943/*
8944 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8945 * used in the Verify Capability link width attribute.
8946 */
8947static u16 opa_to_vc_link_widths(u16 opa_widths)
8948{
8949 int i;
8950 u16 result = 0;
8951
8952 static const struct link_bits {
8953 u16 from;
8954 u16 to;
8955 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08008956 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
8957 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
8958 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
8959 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04008960 };
8961
8962 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8963 if (opa_widths & opa_link_xlate[i].from)
8964 result |= opa_link_xlate[i].to;
8965 }
8966 return result;
8967}
8968
8969/*
8970 * Set link attributes before moving to polling.
8971 */
8972static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8973{
8974 struct hfi1_devdata *dd = ppd->dd;
8975 u8 enable_lane_tx;
8976 u8 tx_polarity_inversion;
8977 u8 rx_polarity_inversion;
8978 int ret;
8979
8980 /* reset our fabric serdes to clear any lingering problems */
8981 fabric_serdes_reset(dd);
8982
8983 /* set the local tx rate - need to read-modify-write */
8984 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8985 &rx_polarity_inversion, &ppd->local_tx_rate);
8986 if (ret)
8987 goto set_local_link_attributes_fail;
8988
8989 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8990 /* set the tx rate to the fastest enabled */
8991 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8992 ppd->local_tx_rate = 1;
8993 else
8994 ppd->local_tx_rate = 0;
8995 } else {
8996 /* set the tx rate to all enabled */
8997 ppd->local_tx_rate = 0;
8998 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8999 ppd->local_tx_rate |= 2;
9000 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9001 ppd->local_tx_rate |= 1;
9002 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009003
9004 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009005 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9006 rx_polarity_inversion, ppd->local_tx_rate);
9007 if (ret != HCMD_SUCCESS)
9008 goto set_local_link_attributes_fail;
9009
9010 /*
9011 * DC supports continuous updates.
9012 */
9013 ret = write_vc_local_phy(dd, 0 /* no power management */,
9014 1 /* continuous updates */);
9015 if (ret != HCMD_SUCCESS)
9016 goto set_local_link_attributes_fail;
9017
9018 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9019 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9020 ppd->port_crc_mode_enabled);
9021 if (ret != HCMD_SUCCESS)
9022 goto set_local_link_attributes_fail;
9023
9024 ret = write_vc_local_link_width(dd, 0, 0,
9025 opa_to_vc_link_widths(ppd->link_width_enabled));
9026 if (ret != HCMD_SUCCESS)
9027 goto set_local_link_attributes_fail;
9028
9029 /* let peer know who we are */
9030 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9031 if (ret == HCMD_SUCCESS)
9032 return 0;
9033
9034set_local_link_attributes_fail:
9035 dd_dev_err(dd,
9036 "Failed to set local link attributes, return 0x%x\n",
9037 ret);
9038 return ret;
9039}
9040
9041/*
9042 * Call this to start the link. Schedule a retry if the cable is not
9043 * present or if unable to start polling. Do not do anything if the
9044 * link is disabled. Returns 0 if link is disabled or moved to polling
9045 */
9046int start_link(struct hfi1_pportdata *ppd)
9047{
9048 if (!ppd->link_enabled) {
9049 dd_dev_info(ppd->dd,
9050 "%s: stopping link start because link is disabled\n",
9051 __func__);
9052 return 0;
9053 }
9054 if (!ppd->driver_link_ready) {
9055 dd_dev_info(ppd->dd,
9056 "%s: stopping link start because driver is not ready\n",
9057 __func__);
9058 return 0;
9059 }
9060
9061 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9062 loopback == LOOPBACK_LCB ||
9063 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9064 return set_link_state(ppd, HLS_DN_POLL);
9065
9066 dd_dev_info(ppd->dd,
9067 "%s: stopping link start because no cable is present\n",
9068 __func__);
9069 return -EAGAIN;
9070}
9071
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009072static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9073{
9074 struct hfi1_devdata *dd = ppd->dd;
9075 u64 mask;
9076 unsigned long timeout;
9077
9078 /*
9079 * Check for QSFP interrupt for t_init (SFF 8679)
9080 */
9081 timeout = jiffies + msecs_to_jiffies(2000);
9082 while (1) {
9083 mask = read_csr(dd, dd->hfi1_id ?
9084 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9085 if (!(mask & QSFP_HFI0_INT_N)) {
9086 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9087 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9088 break;
9089 }
9090 if (time_after(jiffies, timeout)) {
9091 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9092 __func__);
9093 break;
9094 }
9095 udelay(2);
9096 }
9097}
9098
9099static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9100{
9101 struct hfi1_devdata *dd = ppd->dd;
9102 u64 mask;
9103
9104 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9105 if (enable)
9106 mask |= (u64)QSFP_HFI0_INT_N;
9107 else
9108 mask &= ~(u64)QSFP_HFI0_INT_N;
9109 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9110}
9111
9112void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009113{
9114 struct hfi1_devdata *dd = ppd->dd;
9115 u64 mask, qsfp_mask;
9116
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009117 /* Disable INT_N from triggering QSFP interrupts */
9118 set_qsfp_int_n(ppd, 0);
9119
9120 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009121 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009122 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009123 qsfp_mask |= mask;
9124 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009125 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009126
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009127 qsfp_mask = read_csr(dd, dd->hfi1_id ?
9128 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009129 qsfp_mask &= ~mask;
9130 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009131 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009132
9133 udelay(10);
9134
9135 qsfp_mask |= mask;
9136 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009137 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9138
9139 wait_for_qsfp_init(ppd);
9140
9141 /*
9142 * Allow INT_N to trigger the QSFP interrupt to watch
9143 * for alarms and warnings
9144 */
9145 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009146}
9147
9148static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9149 u8 *qsfp_interrupt_status)
9150{
9151 struct hfi1_devdata *dd = ppd->dd;
9152
9153 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9154 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9155 dd_dev_info(dd,
9156 "%s: QSFP cable on fire\n",
9157 __func__);
9158
9159 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9160 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9161 dd_dev_info(dd,
9162 "%s: QSFP cable temperature too low\n",
9163 __func__);
9164
9165 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9166 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9167 dd_dev_info(dd,
9168 "%s: QSFP supply voltage too high\n",
9169 __func__);
9170
9171 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9172 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9173 dd_dev_info(dd,
9174 "%s: QSFP supply voltage too low\n",
9175 __func__);
9176
9177 /* Byte 2 is vendor specific */
9178
9179 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9180 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9181 dd_dev_info(dd,
9182 "%s: Cable RX channel 1/2 power too high\n",
9183 __func__);
9184
9185 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9186 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9187 dd_dev_info(dd,
9188 "%s: Cable RX channel 1/2 power too low\n",
9189 __func__);
9190
9191 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9192 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9193 dd_dev_info(dd,
9194 "%s: Cable RX channel 3/4 power too high\n",
9195 __func__);
9196
9197 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9198 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9199 dd_dev_info(dd,
9200 "%s: Cable RX channel 3/4 power too low\n",
9201 __func__);
9202
9203 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9204 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9205 dd_dev_info(dd,
9206 "%s: Cable TX channel 1/2 bias too high\n",
9207 __func__);
9208
9209 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9210 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9211 dd_dev_info(dd,
9212 "%s: Cable TX channel 1/2 bias too low\n",
9213 __func__);
9214
9215 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9216 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9217 dd_dev_info(dd,
9218 "%s: Cable TX channel 3/4 bias too high\n",
9219 __func__);
9220
9221 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9222 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9223 dd_dev_info(dd,
9224 "%s: Cable TX channel 3/4 bias too low\n",
9225 __func__);
9226
9227 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9228 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9229 dd_dev_info(dd,
9230 "%s: Cable TX channel 1/2 power too high\n",
9231 __func__);
9232
9233 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9234 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9235 dd_dev_info(dd,
9236 "%s: Cable TX channel 1/2 power too low\n",
9237 __func__);
9238
9239 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9240 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9241 dd_dev_info(dd,
9242 "%s: Cable TX channel 3/4 power too high\n",
9243 __func__);
9244
9245 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9246 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9247 dd_dev_info(dd,
9248 "%s: Cable TX channel 3/4 power too low\n",
9249 __func__);
9250
9251 /* Bytes 9-10 and 11-12 are reserved */
9252 /* Bytes 13-15 are vendor specific */
9253
9254 return 0;
9255}
9256
Mike Marciniszyn77241052015-07-30 15:17:43 -04009257/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009258void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009259{
9260 struct qsfp_data *qd;
9261 struct hfi1_pportdata *ppd;
9262 struct hfi1_devdata *dd;
9263
9264 qd = container_of(work, struct qsfp_data, qsfp_work);
9265 ppd = qd->ppd;
9266 dd = ppd->dd;
9267
9268 /* Sanity check */
9269 if (!qsfp_mod_present(ppd))
9270 return;
9271
9272 /*
9273 * Turn DC back on after cables has been
9274 * re-inserted. Up until now, the DC has been in
9275 * reset to save power.
9276 */
9277 dc_start(dd);
9278
9279 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009280 set_qsfp_int_n(ppd, 0);
9281
9282 wait_for_qsfp_init(ppd);
9283
9284 /*
9285 * Allow INT_N to trigger the QSFP interrupt to watch
9286 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009287 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009288 set_qsfp_int_n(ppd, 1);
9289
9290 tune_serdes(ppd);
9291
9292 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009293 }
9294
9295 if (qd->check_interrupt_flags) {
9296 u8 qsfp_interrupt_status[16] = {0,};
9297
9298 if (qsfp_read(ppd, dd->hfi1_id, 6,
9299 &qsfp_interrupt_status[0], 16) != 16) {
9300 dd_dev_info(dd,
9301 "%s: Failed to read status of QSFP module\n",
9302 __func__);
9303 } else {
9304 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009305
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009306 handle_qsfp_error_conditions(
9307 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009308 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9309 ppd->qsfp_info.check_interrupt_flags = 0;
9310 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9311 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009312 }
9313 }
9314}
9315
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009316static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009317{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009318 struct hfi1_pportdata *ppd = dd->pport;
9319 u64 qsfp_mask, cce_int_mask;
9320 const int qsfp1_int_smask = QSFP1_INT % 64;
9321 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009322
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009323 /*
9324 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9325 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9326 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9327 * the index of the appropriate CSR in the CCEIntMask CSR array
9328 */
9329 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9330 (8 * (QSFP1_INT / 64)));
9331 if (dd->hfi1_id) {
9332 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9333 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9334 cce_int_mask);
9335 } else {
9336 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9337 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9338 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009339 }
9340
Mike Marciniszyn77241052015-07-30 15:17:43 -04009341 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9342 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009343 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9344 qsfp_mask);
9345 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9346 qsfp_mask);
9347
9348 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009349
9350 /* Handle active low nature of INT_N and MODPRST_N pins */
9351 if (qsfp_mod_present(ppd))
9352 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9353 write_csr(dd,
9354 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9355 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009356}
9357
Dean Luickbbdeb332015-12-01 15:38:15 -05009358/*
9359 * Do a one-time initialize of the LCB block.
9360 */
9361static void init_lcb(struct hfi1_devdata *dd)
9362{
Dean Luicka59329d2016-02-03 14:32:31 -08009363 /* simulator does not correctly handle LCB cclk loopback, skip */
9364 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9365 return;
9366
Dean Luickbbdeb332015-12-01 15:38:15 -05009367 /* the DC has been reset earlier in the driver load */
9368
9369 /* set LCB for cclk loopback on the port */
9370 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9371 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9372 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9373 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9374 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9375 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9376 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9377}
9378
Mike Marciniszyn77241052015-07-30 15:17:43 -04009379int bringup_serdes(struct hfi1_pportdata *ppd)
9380{
9381 struct hfi1_devdata *dd = ppd->dd;
9382 u64 guid;
9383 int ret;
9384
9385 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9386 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9387
9388 guid = ppd->guid;
9389 if (!guid) {
9390 if (dd->base_guid)
9391 guid = dd->base_guid + ppd->port - 1;
9392 ppd->guid = guid;
9393 }
9394
Mike Marciniszyn77241052015-07-30 15:17:43 -04009395 /* Set linkinit_reason on power up per OPA spec */
9396 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9397
Dean Luickbbdeb332015-12-01 15:38:15 -05009398 /* one-time init of the LCB */
9399 init_lcb(dd);
9400
Mike Marciniszyn77241052015-07-30 15:17:43 -04009401 if (loopback) {
9402 ret = init_loopback(dd);
9403 if (ret < 0)
9404 return ret;
9405 }
9406
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009407 /* tune the SERDES to a ballpark setting for
9408 * optimal signal and bit error rate
9409 * Needs to be done before starting the link
9410 */
9411 tune_serdes(ppd);
9412
Mike Marciniszyn77241052015-07-30 15:17:43 -04009413 return start_link(ppd);
9414}
9415
9416void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9417{
9418 struct hfi1_devdata *dd = ppd->dd;
9419
9420 /*
9421 * Shut down the link and keep it down. First turn off that the
9422 * driver wants to allow the link to be up (driver_link_ready).
9423 * Then make sure the link is not automatically restarted
9424 * (link_enabled). Cancel any pending restart. And finally
9425 * go offline.
9426 */
9427 ppd->driver_link_ready = 0;
9428 ppd->link_enabled = 0;
9429
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009430 ppd->offline_disabled_reason =
9431 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009432 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9433 OPA_LINKDOWN_REASON_SMA_DISABLED);
9434 set_link_state(ppd, HLS_DN_OFFLINE);
9435
9436 /* disable the port */
9437 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9438}
9439
9440static inline int init_cpu_counters(struct hfi1_devdata *dd)
9441{
9442 struct hfi1_pportdata *ppd;
9443 int i;
9444
9445 ppd = (struct hfi1_pportdata *)(dd + 1);
9446 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009447 ppd->ibport_data.rvp.rc_acks = NULL;
9448 ppd->ibport_data.rvp.rc_qacks = NULL;
9449 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9450 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9451 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9452 if (!ppd->ibport_data.rvp.rc_acks ||
9453 !ppd->ibport_data.rvp.rc_delayed_comp ||
9454 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009455 return -ENOMEM;
9456 }
9457
9458 return 0;
9459}
9460
9461static const char * const pt_names[] = {
9462 "expected",
9463 "eager",
9464 "invalid"
9465};
9466
9467static const char *pt_name(u32 type)
9468{
9469 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9470}
9471
9472/*
9473 * index is the index into the receive array
9474 */
9475void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9476 u32 type, unsigned long pa, u16 order)
9477{
9478 u64 reg;
9479 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9480 (dd->kregbase + RCV_ARRAY));
9481
9482 if (!(dd->flags & HFI1_PRESENT))
9483 goto done;
9484
9485 if (type == PT_INVALID) {
9486 pa = 0;
9487 } else if (type > PT_INVALID) {
9488 dd_dev_err(dd,
9489 "unexpected receive array type %u for index %u, not handled\n",
9490 type, index);
9491 goto done;
9492 }
9493
9494 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9495 pt_name(type), index, pa, (unsigned long)order);
9496
9497#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9498 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9499 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9500 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9501 << RCV_ARRAY_RT_ADDR_SHIFT;
9502 writeq(reg, base + (index * 8));
9503
9504 if (type == PT_EAGER)
9505 /*
9506 * Eager entries are written one-by-one so we have to push them
9507 * after we write the entry.
9508 */
9509 flush_wc();
9510done:
9511 return;
9512}
9513
9514void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9515{
9516 struct hfi1_devdata *dd = rcd->dd;
9517 u32 i;
9518
9519 /* this could be optimized */
9520 for (i = rcd->eager_base; i < rcd->eager_base +
9521 rcd->egrbufs.alloced; i++)
9522 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9523
9524 for (i = rcd->expected_base;
9525 i < rcd->expected_base + rcd->expected_count; i++)
9526 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9527}
9528
9529int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9530 struct hfi1_ctxt_info *kinfo)
9531{
9532 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9533 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9534 return 0;
9535}
9536
9537struct hfi1_message_header *hfi1_get_msgheader(
9538 struct hfi1_devdata *dd, __le32 *rhf_addr)
9539{
9540 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9541
9542 return (struct hfi1_message_header *)
9543 (rhf_addr - dd->rhf_offset + offset);
9544}
9545
9546static const char * const ib_cfg_name_strings[] = {
9547 "HFI1_IB_CFG_LIDLMC",
9548 "HFI1_IB_CFG_LWID_DG_ENB",
9549 "HFI1_IB_CFG_LWID_ENB",
9550 "HFI1_IB_CFG_LWID",
9551 "HFI1_IB_CFG_SPD_ENB",
9552 "HFI1_IB_CFG_SPD",
9553 "HFI1_IB_CFG_RXPOL_ENB",
9554 "HFI1_IB_CFG_LREV_ENB",
9555 "HFI1_IB_CFG_LINKLATENCY",
9556 "HFI1_IB_CFG_HRTBT",
9557 "HFI1_IB_CFG_OP_VLS",
9558 "HFI1_IB_CFG_VL_HIGH_CAP",
9559 "HFI1_IB_CFG_VL_LOW_CAP",
9560 "HFI1_IB_CFG_OVERRUN_THRESH",
9561 "HFI1_IB_CFG_PHYERR_THRESH",
9562 "HFI1_IB_CFG_LINKDEFAULT",
9563 "HFI1_IB_CFG_PKEYS",
9564 "HFI1_IB_CFG_MTU",
9565 "HFI1_IB_CFG_LSTATE",
9566 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9567 "HFI1_IB_CFG_PMA_TICKS",
9568 "HFI1_IB_CFG_PORT"
9569};
9570
9571static const char *ib_cfg_name(int which)
9572{
9573 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9574 return "invalid";
9575 return ib_cfg_name_strings[which];
9576}
9577
9578int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9579{
9580 struct hfi1_devdata *dd = ppd->dd;
9581 int val = 0;
9582
9583 switch (which) {
9584 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9585 val = ppd->link_width_enabled;
9586 break;
9587 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9588 val = ppd->link_width_active;
9589 break;
9590 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9591 val = ppd->link_speed_enabled;
9592 break;
9593 case HFI1_IB_CFG_SPD: /* current Link speed */
9594 val = ppd->link_speed_active;
9595 break;
9596
9597 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9598 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9599 case HFI1_IB_CFG_LINKLATENCY:
9600 goto unimplemented;
9601
9602 case HFI1_IB_CFG_OP_VLS:
9603 val = ppd->vls_operational;
9604 break;
9605 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9606 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9607 break;
9608 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9609 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9610 break;
9611 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9612 val = ppd->overrun_threshold;
9613 break;
9614 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9615 val = ppd->phy_error_threshold;
9616 break;
9617 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9618 val = dd->link_default;
9619 break;
9620
9621 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9622 case HFI1_IB_CFG_PMA_TICKS:
9623 default:
9624unimplemented:
9625 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9626 dd_dev_info(
9627 dd,
9628 "%s: which %s: not implemented\n",
9629 __func__,
9630 ib_cfg_name(which));
9631 break;
9632 }
9633
9634 return val;
9635}
9636
9637/*
9638 * The largest MAD packet size.
9639 */
9640#define MAX_MAD_PACKET 2048
9641
9642/*
9643 * Return the maximum header bytes that can go on the _wire_
9644 * for this device. This count includes the ICRC which is
9645 * not part of the packet held in memory but it is appended
9646 * by the HW.
9647 * This is dependent on the device's receive header entry size.
9648 * HFI allows this to be set per-receive context, but the
9649 * driver presently enforces a global value.
9650 */
9651u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9652{
9653 /*
9654 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9655 * the Receive Header Entry Size minus the PBC (or RHF) size
9656 * plus one DW for the ICRC appended by HW.
9657 *
9658 * dd->rcd[0].rcvhdrqentsize is in DW.
9659 * We use rcd[0] as all context will have the same value. Also,
9660 * the first kernel context would have been allocated by now so
9661 * we are guaranteed a valid value.
9662 */
9663 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9664}
9665
9666/*
9667 * Set Send Length
9668 * @ppd - per port data
9669 *
9670 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9671 * registers compare against LRH.PktLen, so use the max bytes included
9672 * in the LRH.
9673 *
9674 * This routine changes all VL values except VL15, which it maintains at
9675 * the same value.
9676 */
9677static void set_send_length(struct hfi1_pportdata *ppd)
9678{
9679 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009680 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9681 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009682 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9683 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9684 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9685 int i;
9686
9687 for (i = 0; i < ppd->vls_supported; i++) {
9688 if (dd->vld[i].mtu > maxvlmtu)
9689 maxvlmtu = dd->vld[i].mtu;
9690 if (i <= 3)
9691 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9692 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9693 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9694 else
9695 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9696 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9697 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9698 }
9699 write_csr(dd, SEND_LEN_CHECK0, len1);
9700 write_csr(dd, SEND_LEN_CHECK1, len2);
9701 /* adjust kernel credit return thresholds based on new MTUs */
9702 /* all kernel receive contexts have the same hdrqentsize */
9703 for (i = 0; i < ppd->vls_supported; i++) {
9704 sc_set_cr_threshold(dd->vld[i].sc,
9705 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9706 dd->rcd[0]->rcvhdrqentsize));
9707 }
9708 sc_set_cr_threshold(dd->vld[15].sc,
9709 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9710 dd->rcd[0]->rcvhdrqentsize));
9711
9712 /* Adjust maximum MTU for the port in DC */
9713 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9714 (ilog2(maxvlmtu >> 8) + 1);
9715 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9716 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9717 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9718 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9719 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9720}
9721
9722static void set_lidlmc(struct hfi1_pportdata *ppd)
9723{
9724 int i;
9725 u64 sreg = 0;
9726 struct hfi1_devdata *dd = ppd->dd;
9727 u32 mask = ~((1U << ppd->lmc) - 1);
9728 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9729
9730 if (dd->hfi1_snoop.mode_flag)
9731 dd_dev_info(dd, "Set lid/lmc while snooping");
9732
9733 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9734 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9735 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009736 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009737 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9738 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9739 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9740
9741 /*
9742 * Iterate over all the send contexts and set their SLID check
9743 */
9744 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9745 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9746 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9747 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9748
9749 for (i = 0; i < dd->chip_send_contexts; i++) {
9750 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9751 i, (u32)sreg);
9752 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9753 }
9754
9755 /* Now we have to do the same thing for the sdma engines */
9756 sdma_update_lmc(dd, mask, ppd->lid);
9757}
9758
9759static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9760{
9761 unsigned long timeout;
9762 u32 curr_state;
9763
9764 timeout = jiffies + msecs_to_jiffies(msecs);
9765 while (1) {
9766 curr_state = read_physical_state(dd);
9767 if (curr_state == state)
9768 break;
9769 if (time_after(jiffies, timeout)) {
9770 dd_dev_err(dd,
9771 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9772 state, curr_state);
9773 return -ETIMEDOUT;
9774 }
9775 usleep_range(1950, 2050); /* sleep 2ms-ish */
9776 }
9777
9778 return 0;
9779}
9780
9781/*
9782 * Helper for set_link_state(). Do not call except from that routine.
9783 * Expects ppd->hls_mutex to be held.
9784 *
9785 * @rem_reason value to be sent to the neighbor
9786 *
9787 * LinkDownReasons only set if transition succeeds.
9788 */
9789static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9790{
9791 struct hfi1_devdata *dd = ppd->dd;
9792 u32 pstate, previous_state;
9793 u32 last_local_state;
9794 u32 last_remote_state;
9795 int ret;
9796 int do_transition;
9797 int do_wait;
9798
9799 previous_state = ppd->host_link_state;
9800 ppd->host_link_state = HLS_GOING_OFFLINE;
9801 pstate = read_physical_state(dd);
9802 if (pstate == PLS_OFFLINE) {
9803 do_transition = 0; /* in right state */
9804 do_wait = 0; /* ...no need to wait */
9805 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9806 do_transition = 0; /* in an offline transient state */
9807 do_wait = 1; /* ...wait for it to settle */
9808 } else {
9809 do_transition = 1; /* need to move to offline */
9810 do_wait = 1; /* ...will need to wait */
9811 }
9812
9813 if (do_transition) {
9814 ret = set_physical_link_state(dd,
9815 PLS_OFFLINE | (rem_reason << 8));
9816
9817 if (ret != HCMD_SUCCESS) {
9818 dd_dev_err(dd,
9819 "Failed to transition to Offline link state, return %d\n",
9820 ret);
9821 return -EINVAL;
9822 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009823 if (ppd->offline_disabled_reason ==
9824 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009825 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009826 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009827 }
9828
9829 if (do_wait) {
9830 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009831 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009832 if (ret < 0)
9833 return ret;
9834 }
9835
9836 /* make sure the logical state is also down */
9837 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9838
9839 /*
9840 * Now in charge of LCB - must be after the physical state is
9841 * offline.quiet and before host_link_state is changed.
9842 */
9843 set_host_lcb_access(dd);
9844 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9845 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9846
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009847 if (ppd->port_type == PORT_TYPE_QSFP &&
9848 ppd->qsfp_info.limiting_active &&
9849 qsfp_mod_present(ppd)) {
9850 set_qsfp_tx(ppd, 0);
9851 }
9852
Mike Marciniszyn77241052015-07-30 15:17:43 -04009853 /*
9854 * The LNI has a mandatory wait time after the physical state
9855 * moves to Offline.Quiet. The wait time may be different
9856 * depending on how the link went down. The 8051 firmware
9857 * will observe the needed wait time and only move to ready
9858 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009859 * is 6s, so wait that long and then at least 0.5s more for
9860 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009861 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009862 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009863 if (ret) {
9864 dd_dev_err(dd,
9865 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9866 /* state is really offline, so make it so */
9867 ppd->host_link_state = HLS_DN_OFFLINE;
9868 return ret;
9869 }
9870
9871 /*
9872 * The state is now offline and the 8051 is ready to accept host
9873 * requests.
9874 * - change our state
9875 * - notify others if we were previously in a linkup state
9876 */
9877 ppd->host_link_state = HLS_DN_OFFLINE;
9878 if (previous_state & HLS_UP) {
9879 /* went down while link was up */
9880 handle_linkup_change(dd, 0);
9881 } else if (previous_state
9882 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9883 /* went down while attempting link up */
9884 /* byte 1 of last_*_state is the failure reason */
9885 read_last_local_state(dd, &last_local_state);
9886 read_last_remote_state(dd, &last_remote_state);
9887 dd_dev_err(dd,
9888 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9889 last_local_state, last_remote_state);
9890 }
9891
9892 /* the active link width (downgrade) is 0 on link down */
9893 ppd->link_width_active = 0;
9894 ppd->link_width_downgrade_tx_active = 0;
9895 ppd->link_width_downgrade_rx_active = 0;
9896 ppd->current_egress_rate = 0;
9897 return 0;
9898}
9899
9900/* return the link state name */
9901static const char *link_state_name(u32 state)
9902{
9903 const char *name;
9904 int n = ilog2(state);
9905 static const char * const names[] = {
9906 [__HLS_UP_INIT_BP] = "INIT",
9907 [__HLS_UP_ARMED_BP] = "ARMED",
9908 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9909 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9910 [__HLS_DN_POLL_BP] = "POLL",
9911 [__HLS_DN_DISABLE_BP] = "DISABLE",
9912 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9913 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9914 [__HLS_GOING_UP_BP] = "GOING_UP",
9915 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9916 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9917 };
9918
9919 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9920 return name ? name : "unknown";
9921}
9922
9923/* return the link state reason name */
9924static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9925{
9926 if (state == HLS_UP_INIT) {
9927 switch (ppd->linkinit_reason) {
9928 case OPA_LINKINIT_REASON_LINKUP:
9929 return "(LINKUP)";
9930 case OPA_LINKINIT_REASON_FLAPPING:
9931 return "(FLAPPING)";
9932 case OPA_LINKINIT_OUTSIDE_POLICY:
9933 return "(OUTSIDE_POLICY)";
9934 case OPA_LINKINIT_QUARANTINED:
9935 return "(QUARANTINED)";
9936 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9937 return "(INSUFIC_CAPABILITY)";
9938 default:
9939 break;
9940 }
9941 }
9942 return "";
9943}
9944
9945/*
9946 * driver_physical_state - convert the driver's notion of a port's
9947 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9948 * Return -1 (converted to a u32) to indicate error.
9949 */
9950u32 driver_physical_state(struct hfi1_pportdata *ppd)
9951{
9952 switch (ppd->host_link_state) {
9953 case HLS_UP_INIT:
9954 case HLS_UP_ARMED:
9955 case HLS_UP_ACTIVE:
9956 return IB_PORTPHYSSTATE_LINKUP;
9957 case HLS_DN_POLL:
9958 return IB_PORTPHYSSTATE_POLLING;
9959 case HLS_DN_DISABLE:
9960 return IB_PORTPHYSSTATE_DISABLED;
9961 case HLS_DN_OFFLINE:
9962 return OPA_PORTPHYSSTATE_OFFLINE;
9963 case HLS_VERIFY_CAP:
9964 return IB_PORTPHYSSTATE_POLLING;
9965 case HLS_GOING_UP:
9966 return IB_PORTPHYSSTATE_POLLING;
9967 case HLS_GOING_OFFLINE:
9968 return OPA_PORTPHYSSTATE_OFFLINE;
9969 case HLS_LINK_COOLDOWN:
9970 return OPA_PORTPHYSSTATE_OFFLINE;
9971 case HLS_DN_DOWNDEF:
9972 default:
9973 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9974 ppd->host_link_state);
9975 return -1;
9976 }
9977}
9978
9979/*
9980 * driver_logical_state - convert the driver's notion of a port's
9981 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9982 * (converted to a u32) to indicate error.
9983 */
9984u32 driver_logical_state(struct hfi1_pportdata *ppd)
9985{
9986 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9987 return IB_PORT_DOWN;
9988
9989 switch (ppd->host_link_state & HLS_UP) {
9990 case HLS_UP_INIT:
9991 return IB_PORT_INIT;
9992 case HLS_UP_ARMED:
9993 return IB_PORT_ARMED;
9994 case HLS_UP_ACTIVE:
9995 return IB_PORT_ACTIVE;
9996 default:
9997 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9998 ppd->host_link_state);
9999 return -1;
10000 }
10001}
10002
10003void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10004 u8 neigh_reason, u8 rem_reason)
10005{
10006 if (ppd->local_link_down_reason.latest == 0 &&
10007 ppd->neigh_link_down_reason.latest == 0) {
10008 ppd->local_link_down_reason.latest = lcl_reason;
10009 ppd->neigh_link_down_reason.latest = neigh_reason;
10010 ppd->remote_link_down_reason = rem_reason;
10011 }
10012}
10013
10014/*
10015 * Change the physical and/or logical link state.
10016 *
10017 * Do not call this routine while inside an interrupt. It contains
10018 * calls to routines that can take multiple seconds to finish.
10019 *
10020 * Returns 0 on success, -errno on failure.
10021 */
10022int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10023{
10024 struct hfi1_devdata *dd = ppd->dd;
10025 struct ib_event event = {.device = NULL};
10026 int ret1, ret = 0;
10027 int was_up, is_down;
10028 int orig_new_state, poll_bounce;
10029
10030 mutex_lock(&ppd->hls_lock);
10031
10032 orig_new_state = state;
10033 if (state == HLS_DN_DOWNDEF)
10034 state = dd->link_default;
10035
10036 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010037 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10038 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010039
10040 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10041 link_state_name(ppd->host_link_state),
10042 link_state_name(orig_new_state),
10043 poll_bounce ? "(bounce) " : "",
10044 link_state_reason_name(ppd, state));
10045
10046 was_up = !!(ppd->host_link_state & HLS_UP);
10047
10048 /*
10049 * If we're going to a (HLS_*) link state that implies the logical
10050 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10051 * reset is_sm_config_started to 0.
10052 */
10053 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10054 ppd->is_sm_config_started = 0;
10055
10056 /*
10057 * Do nothing if the states match. Let a poll to poll link bounce
10058 * go through.
10059 */
10060 if (ppd->host_link_state == state && !poll_bounce)
10061 goto done;
10062
10063 switch (state) {
10064 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010065 if (ppd->host_link_state == HLS_DN_POLL &&
10066 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010067 /*
10068 * Quick link up jumps from polling to here.
10069 *
10070 * Whether in normal or loopback mode, the
10071 * simulator jumps from polling to link up.
10072 * Accept that here.
10073 */
10074 /* OK */;
10075 } else if (ppd->host_link_state != HLS_GOING_UP) {
10076 goto unexpected;
10077 }
10078
10079 ppd->host_link_state = HLS_UP_INIT;
10080 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10081 if (ret) {
10082 /* logical state didn't change, stay at going_up */
10083 ppd->host_link_state = HLS_GOING_UP;
10084 dd_dev_err(dd,
10085 "%s: logical state did not change to INIT\n",
10086 __func__);
10087 } else {
10088 /* clear old transient LINKINIT_REASON code */
10089 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10090 ppd->linkinit_reason =
10091 OPA_LINKINIT_REASON_LINKUP;
10092
10093 /* enable the port */
10094 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10095
10096 handle_linkup_change(dd, 1);
10097 }
10098 break;
10099 case HLS_UP_ARMED:
10100 if (ppd->host_link_state != HLS_UP_INIT)
10101 goto unexpected;
10102
10103 ppd->host_link_state = HLS_UP_ARMED;
10104 set_logical_state(dd, LSTATE_ARMED);
10105 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10106 if (ret) {
10107 /* logical state didn't change, stay at init */
10108 ppd->host_link_state = HLS_UP_INIT;
10109 dd_dev_err(dd,
10110 "%s: logical state did not change to ARMED\n",
10111 __func__);
10112 }
10113 /*
10114 * The simulator does not currently implement SMA messages,
10115 * so neighbor_normal is not set. Set it here when we first
10116 * move to Armed.
10117 */
10118 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10119 ppd->neighbor_normal = 1;
10120 break;
10121 case HLS_UP_ACTIVE:
10122 if (ppd->host_link_state != HLS_UP_ARMED)
10123 goto unexpected;
10124
10125 ppd->host_link_state = HLS_UP_ACTIVE;
10126 set_logical_state(dd, LSTATE_ACTIVE);
10127 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10128 if (ret) {
10129 /* logical state didn't change, stay at armed */
10130 ppd->host_link_state = HLS_UP_ARMED;
10131 dd_dev_err(dd,
10132 "%s: logical state did not change to ACTIVE\n",
10133 __func__);
10134 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010135 /* tell all engines to go running */
10136 sdma_all_running(dd);
10137
10138 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010139 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010140 event.element.port_num = ppd->port;
10141 event.event = IB_EVENT_PORT_ACTIVE;
10142 }
10143 break;
10144 case HLS_DN_POLL:
10145 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10146 ppd->host_link_state == HLS_DN_OFFLINE) &&
10147 dd->dc_shutdown)
10148 dc_start(dd);
10149 /* Hand LED control to the DC */
10150 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10151
10152 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10153 u8 tmp = ppd->link_enabled;
10154
10155 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10156 if (ret) {
10157 ppd->link_enabled = tmp;
10158 break;
10159 }
10160 ppd->remote_link_down_reason = 0;
10161
10162 if (ppd->driver_link_ready)
10163 ppd->link_enabled = 1;
10164 }
10165
Jim Snowfb9036d2016-01-11 18:32:21 -050010166 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010167 ret = set_local_link_attributes(ppd);
10168 if (ret)
10169 break;
10170
10171 ppd->port_error_action = 0;
10172 ppd->host_link_state = HLS_DN_POLL;
10173
10174 if (quick_linkup) {
10175 /* quick linkup does not go into polling */
10176 ret = do_quick_linkup(dd);
10177 } else {
10178 ret1 = set_physical_link_state(dd, PLS_POLLING);
10179 if (ret1 != HCMD_SUCCESS) {
10180 dd_dev_err(dd,
10181 "Failed to transition to Polling link state, return 0x%x\n",
10182 ret1);
10183 ret = -EINVAL;
10184 }
10185 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010186 ppd->offline_disabled_reason =
10187 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010188 /*
10189 * If an error occurred above, go back to offline. The
10190 * caller may reschedule another attempt.
10191 */
10192 if (ret)
10193 goto_offline(ppd, 0);
10194 break;
10195 case HLS_DN_DISABLE:
10196 /* link is disabled */
10197 ppd->link_enabled = 0;
10198
10199 /* allow any state to transition to disabled */
10200
10201 /* must transition to offline first */
10202 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10203 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10204 if (ret)
10205 break;
10206 ppd->remote_link_down_reason = 0;
10207 }
10208
10209 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10210 if (ret1 != HCMD_SUCCESS) {
10211 dd_dev_err(dd,
10212 "Failed to transition to Disabled link state, return 0x%x\n",
10213 ret1);
10214 ret = -EINVAL;
10215 break;
10216 }
10217 ppd->host_link_state = HLS_DN_DISABLE;
10218 dc_shutdown(dd);
10219 break;
10220 case HLS_DN_OFFLINE:
10221 if (ppd->host_link_state == HLS_DN_DISABLE)
10222 dc_start(dd);
10223
10224 /* allow any state to transition to offline */
10225 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10226 if (!ret)
10227 ppd->remote_link_down_reason = 0;
10228 break;
10229 case HLS_VERIFY_CAP:
10230 if (ppd->host_link_state != HLS_DN_POLL)
10231 goto unexpected;
10232 ppd->host_link_state = HLS_VERIFY_CAP;
10233 break;
10234 case HLS_GOING_UP:
10235 if (ppd->host_link_state != HLS_VERIFY_CAP)
10236 goto unexpected;
10237
10238 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10239 if (ret1 != HCMD_SUCCESS) {
10240 dd_dev_err(dd,
10241 "Failed to transition to link up state, return 0x%x\n",
10242 ret1);
10243 ret = -EINVAL;
10244 break;
10245 }
10246 ppd->host_link_state = HLS_GOING_UP;
10247 break;
10248
10249 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10250 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10251 default:
10252 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10253 __func__, state);
10254 ret = -EINVAL;
10255 break;
10256 }
10257
10258 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10259 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10260
10261 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10262 ppd->neigh_link_down_reason.sma == 0) {
10263 ppd->local_link_down_reason.sma =
10264 ppd->local_link_down_reason.latest;
10265 ppd->neigh_link_down_reason.sma =
10266 ppd->neigh_link_down_reason.latest;
10267 }
10268
10269 goto done;
10270
10271unexpected:
10272 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10273 __func__, link_state_name(ppd->host_link_state),
10274 link_state_name(state));
10275 ret = -EINVAL;
10276
10277done:
10278 mutex_unlock(&ppd->hls_lock);
10279
10280 if (event.device)
10281 ib_dispatch_event(&event);
10282
10283 return ret;
10284}
10285
10286int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10287{
10288 u64 reg;
10289 int ret = 0;
10290
10291 switch (which) {
10292 case HFI1_IB_CFG_LIDLMC:
10293 set_lidlmc(ppd);
10294 break;
10295 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10296 /*
10297 * The VL Arbitrator high limit is sent in units of 4k
10298 * bytes, while HFI stores it in units of 64 bytes.
10299 */
Jubin John8638b772016-02-14 20:19:24 -080010300 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010301 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10302 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10303 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10304 break;
10305 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10306 /* HFI only supports POLL as the default link down state */
10307 if (val != HLS_DN_POLL)
10308 ret = -EINVAL;
10309 break;
10310 case HFI1_IB_CFG_OP_VLS:
10311 if (ppd->vls_operational != val) {
10312 ppd->vls_operational = val;
10313 if (!ppd->port)
10314 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010315 }
10316 break;
10317 /*
10318 * For link width, link width downgrade, and speed enable, always AND
10319 * the setting with what is actually supported. This has two benefits.
10320 * First, enabled can't have unsupported values, no matter what the
10321 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10322 * "fill in with your supported value" have all the bits in the
10323 * field set, so simply ANDing with supported has the desired result.
10324 */
10325 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10326 ppd->link_width_enabled = val & ppd->link_width_supported;
10327 break;
10328 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10329 ppd->link_width_downgrade_enabled =
10330 val & ppd->link_width_downgrade_supported;
10331 break;
10332 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10333 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10334 break;
10335 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10336 /*
10337 * HFI does not follow IB specs, save this value
10338 * so we can report it, if asked.
10339 */
10340 ppd->overrun_threshold = val;
10341 break;
10342 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10343 /*
10344 * HFI does not follow IB specs, save this value
10345 * so we can report it, if asked.
10346 */
10347 ppd->phy_error_threshold = val;
10348 break;
10349
10350 case HFI1_IB_CFG_MTU:
10351 set_send_length(ppd);
10352 break;
10353
10354 case HFI1_IB_CFG_PKEYS:
10355 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10356 set_partition_keys(ppd);
10357 break;
10358
10359 default:
10360 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10361 dd_dev_info(ppd->dd,
10362 "%s: which %s, val 0x%x: not implemented\n",
10363 __func__, ib_cfg_name(which), val);
10364 break;
10365 }
10366 return ret;
10367}
10368
10369/* begin functions related to vl arbitration table caching */
10370static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10371{
10372 int i;
10373
10374 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10375 VL_ARB_LOW_PRIO_TABLE_SIZE);
10376 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10377 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10378
10379 /*
10380 * Note that we always return values directly from the
10381 * 'vl_arb_cache' (and do no CSR reads) in response to a
10382 * 'Get(VLArbTable)'. This is obviously correct after a
10383 * 'Set(VLArbTable)', since the cache will then be up to
10384 * date. But it's also correct prior to any 'Set(VLArbTable)'
10385 * since then both the cache, and the relevant h/w registers
10386 * will be zeroed.
10387 */
10388
10389 for (i = 0; i < MAX_PRIO_TABLE; i++)
10390 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10391}
10392
10393/*
10394 * vl_arb_lock_cache
10395 *
10396 * All other vl_arb_* functions should be called only after locking
10397 * the cache.
10398 */
10399static inline struct vl_arb_cache *
10400vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10401{
10402 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10403 return NULL;
10404 spin_lock(&ppd->vl_arb_cache[idx].lock);
10405 return &ppd->vl_arb_cache[idx];
10406}
10407
10408static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10409{
10410 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10411}
10412
10413static void vl_arb_get_cache(struct vl_arb_cache *cache,
10414 struct ib_vl_weight_elem *vl)
10415{
10416 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10417}
10418
10419static void vl_arb_set_cache(struct vl_arb_cache *cache,
10420 struct ib_vl_weight_elem *vl)
10421{
10422 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10423}
10424
10425static int vl_arb_match_cache(struct vl_arb_cache *cache,
10426 struct ib_vl_weight_elem *vl)
10427{
10428 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10429}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010430
Mike Marciniszyn77241052015-07-30 15:17:43 -040010431/* end functions related to vl arbitration table caching */
10432
10433static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10434 u32 size, struct ib_vl_weight_elem *vl)
10435{
10436 struct hfi1_devdata *dd = ppd->dd;
10437 u64 reg;
10438 unsigned int i, is_up = 0;
10439 int drain, ret = 0;
10440
10441 mutex_lock(&ppd->hls_lock);
10442
10443 if (ppd->host_link_state & HLS_UP)
10444 is_up = 1;
10445
10446 drain = !is_ax(dd) && is_up;
10447
10448 if (drain)
10449 /*
10450 * Before adjusting VL arbitration weights, empty per-VL
10451 * FIFOs, otherwise a packet whose VL weight is being
10452 * set to 0 could get stuck in a FIFO with no chance to
10453 * egress.
10454 */
10455 ret = stop_drain_data_vls(dd);
10456
10457 if (ret) {
10458 dd_dev_err(
10459 dd,
10460 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10461 __func__);
10462 goto err;
10463 }
10464
10465 for (i = 0; i < size; i++, vl++) {
10466 /*
10467 * NOTE: The low priority shift and mask are used here, but
10468 * they are the same for both the low and high registers.
10469 */
10470 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10471 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10472 | (((u64)vl->weight
10473 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10474 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10475 write_csr(dd, target + (i * 8), reg);
10476 }
10477 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10478
10479 if (drain)
10480 open_fill_data_vls(dd); /* reopen all VLs */
10481
10482err:
10483 mutex_unlock(&ppd->hls_lock);
10484
10485 return ret;
10486}
10487
10488/*
10489 * Read one credit merge VL register.
10490 */
10491static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10492 struct vl_limit *vll)
10493{
10494 u64 reg = read_csr(dd, csr);
10495
10496 vll->dedicated = cpu_to_be16(
10497 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10498 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10499 vll->shared = cpu_to_be16(
10500 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10501 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10502}
10503
10504/*
10505 * Read the current credit merge limits.
10506 */
10507static int get_buffer_control(struct hfi1_devdata *dd,
10508 struct buffer_control *bc, u16 *overall_limit)
10509{
10510 u64 reg;
10511 int i;
10512
10513 /* not all entries are filled in */
10514 memset(bc, 0, sizeof(*bc));
10515
10516 /* OPA and HFI have a 1-1 mapping */
10517 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010518 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010519
10520 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10521 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10522
10523 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10524 bc->overall_shared_limit = cpu_to_be16(
10525 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10526 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10527 if (overall_limit)
10528 *overall_limit = (reg
10529 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10530 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10531 return sizeof(struct buffer_control);
10532}
10533
10534static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10535{
10536 u64 reg;
10537 int i;
10538
10539 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10540 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10541 for (i = 0; i < sizeof(u64); i++) {
10542 u8 byte = *(((u8 *)&reg) + i);
10543
10544 dp->vlnt[2 * i] = byte & 0xf;
10545 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10546 }
10547
10548 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10549 for (i = 0; i < sizeof(u64); i++) {
10550 u8 byte = *(((u8 *)&reg) + i);
10551
10552 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10553 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10554 }
10555 return sizeof(struct sc2vlnt);
10556}
10557
10558static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10559 struct ib_vl_weight_elem *vl)
10560{
10561 unsigned int i;
10562
10563 for (i = 0; i < nelems; i++, vl++) {
10564 vl->vl = 0xf;
10565 vl->weight = 0;
10566 }
10567}
10568
10569static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10570{
10571 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10572 DC_SC_VL_VAL(15_0,
10573 0, dp->vlnt[0] & 0xf,
10574 1, dp->vlnt[1] & 0xf,
10575 2, dp->vlnt[2] & 0xf,
10576 3, dp->vlnt[3] & 0xf,
10577 4, dp->vlnt[4] & 0xf,
10578 5, dp->vlnt[5] & 0xf,
10579 6, dp->vlnt[6] & 0xf,
10580 7, dp->vlnt[7] & 0xf,
10581 8, dp->vlnt[8] & 0xf,
10582 9, dp->vlnt[9] & 0xf,
10583 10, dp->vlnt[10] & 0xf,
10584 11, dp->vlnt[11] & 0xf,
10585 12, dp->vlnt[12] & 0xf,
10586 13, dp->vlnt[13] & 0xf,
10587 14, dp->vlnt[14] & 0xf,
10588 15, dp->vlnt[15] & 0xf));
10589 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10590 DC_SC_VL_VAL(31_16,
10591 16, dp->vlnt[16] & 0xf,
10592 17, dp->vlnt[17] & 0xf,
10593 18, dp->vlnt[18] & 0xf,
10594 19, dp->vlnt[19] & 0xf,
10595 20, dp->vlnt[20] & 0xf,
10596 21, dp->vlnt[21] & 0xf,
10597 22, dp->vlnt[22] & 0xf,
10598 23, dp->vlnt[23] & 0xf,
10599 24, dp->vlnt[24] & 0xf,
10600 25, dp->vlnt[25] & 0xf,
10601 26, dp->vlnt[26] & 0xf,
10602 27, dp->vlnt[27] & 0xf,
10603 28, dp->vlnt[28] & 0xf,
10604 29, dp->vlnt[29] & 0xf,
10605 30, dp->vlnt[30] & 0xf,
10606 31, dp->vlnt[31] & 0xf));
10607}
10608
10609static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10610 u16 limit)
10611{
10612 if (limit != 0)
10613 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10614 what, (int)limit, idx);
10615}
10616
10617/* change only the shared limit portion of SendCmGLobalCredit */
10618static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10619{
10620 u64 reg;
10621
10622 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10623 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10624 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10625 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10626}
10627
10628/* change only the total credit limit portion of SendCmGLobalCredit */
10629static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10630{
10631 u64 reg;
10632
10633 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10634 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10635 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10636 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10637}
10638
10639/* set the given per-VL shared limit */
10640static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10641{
10642 u64 reg;
10643 u32 addr;
10644
10645 if (vl < TXE_NUM_DATA_VL)
10646 addr = SEND_CM_CREDIT_VL + (8 * vl);
10647 else
10648 addr = SEND_CM_CREDIT_VL15;
10649
10650 reg = read_csr(dd, addr);
10651 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10652 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10653 write_csr(dd, addr, reg);
10654}
10655
10656/* set the given per-VL dedicated limit */
10657static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10658{
10659 u64 reg;
10660 u32 addr;
10661
10662 if (vl < TXE_NUM_DATA_VL)
10663 addr = SEND_CM_CREDIT_VL + (8 * vl);
10664 else
10665 addr = SEND_CM_CREDIT_VL15;
10666
10667 reg = read_csr(dd, addr);
10668 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10669 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10670 write_csr(dd, addr, reg);
10671}
10672
10673/* spin until the given per-VL status mask bits clear */
10674static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10675 const char *which)
10676{
10677 unsigned long timeout;
10678 u64 reg;
10679
10680 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10681 while (1) {
10682 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10683
10684 if (reg == 0)
10685 return; /* success */
10686 if (time_after(jiffies, timeout))
10687 break; /* timed out */
10688 udelay(1);
10689 }
10690
10691 dd_dev_err(dd,
10692 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10693 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10694 /*
10695 * If this occurs, it is likely there was a credit loss on the link.
10696 * The only recovery from that is a link bounce.
10697 */
10698 dd_dev_err(dd,
10699 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10700}
10701
10702/*
10703 * The number of credits on the VLs may be changed while everything
10704 * is "live", but the following algorithm must be followed due to
10705 * how the hardware is actually implemented. In particular,
10706 * Return_Credit_Status[] is the only correct status check.
10707 *
10708 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10709 * set Global_Shared_Credit_Limit = 0
10710 * use_all_vl = 1
10711 * mask0 = all VLs that are changing either dedicated or shared limits
10712 * set Shared_Limit[mask0] = 0
10713 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10714 * if (changing any dedicated limit)
10715 * mask1 = all VLs that are lowering dedicated limits
10716 * lower Dedicated_Limit[mask1]
10717 * spin until Return_Credit_Status[mask1] == 0
10718 * raise Dedicated_Limits
10719 * raise Shared_Limits
10720 * raise Global_Shared_Credit_Limit
10721 *
10722 * lower = if the new limit is lower, set the limit to the new value
10723 * raise = if the new limit is higher than the current value (may be changed
10724 * earlier in the algorithm), set the new limit to the new value
10725 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010726int set_buffer_control(struct hfi1_pportdata *ppd,
10727 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010728{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010729 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010730 u64 changing_mask, ld_mask, stat_mask;
10731 int change_count;
10732 int i, use_all_mask;
10733 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010734 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010735 /*
10736 * A0: add the variable any_shared_limit_changing below and in the
10737 * algorithm above. If removing A0 support, it can be removed.
10738 */
10739 int any_shared_limit_changing;
10740 struct buffer_control cur_bc;
10741 u8 changing[OPA_MAX_VLS];
10742 u8 lowering_dedicated[OPA_MAX_VLS];
10743 u16 cur_total;
10744 u32 new_total = 0;
10745 const u64 all_mask =
10746 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10747 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10748 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10749 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10750 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10751 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10752 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10753 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10754 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10755
10756#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10757#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10758
Mike Marciniszyn77241052015-07-30 15:17:43 -040010759 /* find the new total credits, do sanity check on unused VLs */
10760 for (i = 0; i < OPA_MAX_VLS; i++) {
10761 if (valid_vl(i)) {
10762 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10763 continue;
10764 }
10765 nonzero_msg(dd, i, "dedicated",
10766 be16_to_cpu(new_bc->vl[i].dedicated));
10767 nonzero_msg(dd, i, "shared",
10768 be16_to_cpu(new_bc->vl[i].shared));
10769 new_bc->vl[i].dedicated = 0;
10770 new_bc->vl[i].shared = 0;
10771 }
10772 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010773
Mike Marciniszyn77241052015-07-30 15:17:43 -040010774 /* fetch the current values */
10775 get_buffer_control(dd, &cur_bc, &cur_total);
10776
10777 /*
10778 * Create the masks we will use.
10779 */
10780 memset(changing, 0, sizeof(changing));
10781 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080010782 /*
10783 * NOTE: Assumes that the individual VL bits are adjacent and in
10784 * increasing order
10785 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010786 stat_mask =
10787 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10788 changing_mask = 0;
10789 ld_mask = 0;
10790 change_count = 0;
10791 any_shared_limit_changing = 0;
10792 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10793 if (!valid_vl(i))
10794 continue;
10795 this_shared_changing = new_bc->vl[i].shared
10796 != cur_bc.vl[i].shared;
10797 if (this_shared_changing)
10798 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010799 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10800 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010801 changing[i] = 1;
10802 changing_mask |= stat_mask;
10803 change_count++;
10804 }
10805 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10806 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10807 lowering_dedicated[i] = 1;
10808 ld_mask |= stat_mask;
10809 }
10810 }
10811
10812 /* bracket the credit change with a total adjustment */
10813 if (new_total > cur_total)
10814 set_global_limit(dd, new_total);
10815
10816 /*
10817 * Start the credit change algorithm.
10818 */
10819 use_all_mask = 0;
10820 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010821 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10822 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010823 set_global_shared(dd, 0);
10824 cur_bc.overall_shared_limit = 0;
10825 use_all_mask = 1;
10826 }
10827
10828 for (i = 0; i < NUM_USABLE_VLS; i++) {
10829 if (!valid_vl(i))
10830 continue;
10831
10832 if (changing[i]) {
10833 set_vl_shared(dd, i, 0);
10834 cur_bc.vl[i].shared = 0;
10835 }
10836 }
10837
10838 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10839 "shared");
10840
10841 if (change_count > 0) {
10842 for (i = 0; i < NUM_USABLE_VLS; i++) {
10843 if (!valid_vl(i))
10844 continue;
10845
10846 if (lowering_dedicated[i]) {
10847 set_vl_dedicated(dd, i,
10848 be16_to_cpu(new_bc->vl[i].dedicated));
10849 cur_bc.vl[i].dedicated =
10850 new_bc->vl[i].dedicated;
10851 }
10852 }
10853
10854 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10855
10856 /* now raise all dedicated that are going up */
10857 for (i = 0; i < NUM_USABLE_VLS; i++) {
10858 if (!valid_vl(i))
10859 continue;
10860
10861 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10862 be16_to_cpu(cur_bc.vl[i].dedicated))
10863 set_vl_dedicated(dd, i,
10864 be16_to_cpu(new_bc->vl[i].dedicated));
10865 }
10866 }
10867
10868 /* next raise all shared that are going up */
10869 for (i = 0; i < NUM_USABLE_VLS; i++) {
10870 if (!valid_vl(i))
10871 continue;
10872
10873 if (be16_to_cpu(new_bc->vl[i].shared) >
10874 be16_to_cpu(cur_bc.vl[i].shared))
10875 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10876 }
10877
10878 /* finally raise the global shared */
10879 if (be16_to_cpu(new_bc->overall_shared_limit) >
10880 be16_to_cpu(cur_bc.overall_shared_limit))
10881 set_global_shared(dd,
10882 be16_to_cpu(new_bc->overall_shared_limit));
10883
10884 /* bracket the credit change with a total adjustment */
10885 if (new_total < cur_total)
10886 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010887
10888 /*
10889 * Determine the actual number of operational VLS using the number of
10890 * dedicated and shared credits for each VL.
10891 */
10892 if (change_count > 0) {
10893 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10894 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10895 be16_to_cpu(new_bc->vl[i].shared) > 0)
10896 vl_count++;
10897 ppd->actual_vls_operational = vl_count;
10898 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10899 ppd->actual_vls_operational :
10900 ppd->vls_operational,
10901 NULL);
10902 if (ret == 0)
10903 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10904 ppd->actual_vls_operational :
10905 ppd->vls_operational, NULL);
10906 if (ret)
10907 return ret;
10908 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040010909 return 0;
10910}
10911
10912/*
10913 * Read the given fabric manager table. Return the size of the
10914 * table (in bytes) on success, and a negative error code on
10915 * failure.
10916 */
10917int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10918
10919{
10920 int size;
10921 struct vl_arb_cache *vlc;
10922
10923 switch (which) {
10924 case FM_TBL_VL_HIGH_ARB:
10925 size = 256;
10926 /*
10927 * OPA specifies 128 elements (of 2 bytes each), though
10928 * HFI supports only 16 elements in h/w.
10929 */
10930 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10931 vl_arb_get_cache(vlc, t);
10932 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10933 break;
10934 case FM_TBL_VL_LOW_ARB:
10935 size = 256;
10936 /*
10937 * OPA specifies 128 elements (of 2 bytes each), though
10938 * HFI supports only 16 elements in h/w.
10939 */
10940 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10941 vl_arb_get_cache(vlc, t);
10942 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10943 break;
10944 case FM_TBL_BUFFER_CONTROL:
10945 size = get_buffer_control(ppd->dd, t, NULL);
10946 break;
10947 case FM_TBL_SC2VLNT:
10948 size = get_sc2vlnt(ppd->dd, t);
10949 break;
10950 case FM_TBL_VL_PREEMPT_ELEMS:
10951 size = 256;
10952 /* OPA specifies 128 elements, of 2 bytes each */
10953 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10954 break;
10955 case FM_TBL_VL_PREEMPT_MATRIX:
10956 size = 256;
10957 /*
10958 * OPA specifies that this is the same size as the VL
10959 * arbitration tables (i.e., 256 bytes).
10960 */
10961 break;
10962 default:
10963 return -EINVAL;
10964 }
10965 return size;
10966}
10967
10968/*
10969 * Write the given fabric manager table.
10970 */
10971int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10972{
10973 int ret = 0;
10974 struct vl_arb_cache *vlc;
10975
10976 switch (which) {
10977 case FM_TBL_VL_HIGH_ARB:
10978 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10979 if (vl_arb_match_cache(vlc, t)) {
10980 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10981 break;
10982 }
10983 vl_arb_set_cache(vlc, t);
10984 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10985 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10986 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10987 break;
10988 case FM_TBL_VL_LOW_ARB:
10989 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10990 if (vl_arb_match_cache(vlc, t)) {
10991 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10992 break;
10993 }
10994 vl_arb_set_cache(vlc, t);
10995 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10996 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10997 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10998 break;
10999 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011000 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011001 break;
11002 case FM_TBL_SC2VLNT:
11003 set_sc2vlnt(ppd->dd, t);
11004 break;
11005 default:
11006 ret = -EINVAL;
11007 }
11008 return ret;
11009}
11010
11011/*
11012 * Disable all data VLs.
11013 *
11014 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11015 */
11016static int disable_data_vls(struct hfi1_devdata *dd)
11017{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011018 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011019 return 1;
11020
11021 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11022
11023 return 0;
11024}
11025
11026/*
11027 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11028 * Just re-enables all data VLs (the "fill" part happens
11029 * automatically - the name was chosen for symmetry with
11030 * stop_drain_data_vls()).
11031 *
11032 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11033 */
11034int open_fill_data_vls(struct hfi1_devdata *dd)
11035{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011036 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011037 return 1;
11038
11039 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11040
11041 return 0;
11042}
11043
11044/*
11045 * drain_data_vls() - assumes that disable_data_vls() has been called,
11046 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11047 * engines to drop to 0.
11048 */
11049static void drain_data_vls(struct hfi1_devdata *dd)
11050{
11051 sc_wait(dd);
11052 sdma_wait(dd);
11053 pause_for_credit_return(dd);
11054}
11055
11056/*
11057 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11058 *
11059 * Use open_fill_data_vls() to resume using data VLs. This pair is
11060 * meant to be used like this:
11061 *
11062 * stop_drain_data_vls(dd);
11063 * // do things with per-VL resources
11064 * open_fill_data_vls(dd);
11065 */
11066int stop_drain_data_vls(struct hfi1_devdata *dd)
11067{
11068 int ret;
11069
11070 ret = disable_data_vls(dd);
11071 if (ret == 0)
11072 drain_data_vls(dd);
11073
11074 return ret;
11075}
11076
11077/*
11078 * Convert a nanosecond time to a cclock count. No matter how slow
11079 * the cclock, a non-zero ns will always have a non-zero result.
11080 */
11081u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11082{
11083 u32 cclocks;
11084
11085 if (dd->icode == ICODE_FPGA_EMULATION)
11086 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11087 else /* simulation pretends to be ASIC */
11088 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11089 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11090 cclocks = 1;
11091 return cclocks;
11092}
11093
11094/*
11095 * Convert a cclock count to nanoseconds. Not matter how slow
11096 * the cclock, a non-zero cclocks will always have a non-zero result.
11097 */
11098u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11099{
11100 u32 ns;
11101
11102 if (dd->icode == ICODE_FPGA_EMULATION)
11103 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11104 else /* simulation pretends to be ASIC */
11105 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11106 if (cclocks && !ns)
11107 ns = 1;
11108 return ns;
11109}
11110
11111/*
11112 * Dynamically adjust the receive interrupt timeout for a context based on
11113 * incoming packet rate.
11114 *
11115 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11116 */
11117static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11118{
11119 struct hfi1_devdata *dd = rcd->dd;
11120 u32 timeout = rcd->rcvavail_timeout;
11121
11122 /*
11123 * This algorithm doubles or halves the timeout depending on whether
11124 * the number of packets received in this interrupt were less than or
11125 * greater equal the interrupt count.
11126 *
11127 * The calculations below do not allow a steady state to be achieved.
11128 * Only at the endpoints it is possible to have an unchanging
11129 * timeout.
11130 */
11131 if (npkts < rcv_intr_count) {
11132 /*
11133 * Not enough packets arrived before the timeout, adjust
11134 * timeout downward.
11135 */
11136 if (timeout < 2) /* already at minimum? */
11137 return;
11138 timeout >>= 1;
11139 } else {
11140 /*
11141 * More than enough packets arrived before the timeout, adjust
11142 * timeout upward.
11143 */
11144 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11145 return;
11146 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11147 }
11148
11149 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011150 /*
11151 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11152 * been verified to be in range
11153 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011154 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11155 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11156}
11157
11158void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11159 u32 intr_adjust, u32 npkts)
11160{
11161 struct hfi1_devdata *dd = rcd->dd;
11162 u64 reg;
11163 u32 ctxt = rcd->ctxt;
11164
11165 /*
11166 * Need to write timeout register before updating RcvHdrHead to ensure
11167 * that a new value is used when the HW decides to restart counting.
11168 */
11169 if (intr_adjust)
11170 adjust_rcv_timeout(rcd, npkts);
11171 if (updegr) {
11172 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11173 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11174 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11175 }
11176 mmiowb();
11177 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11178 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11179 << RCV_HDR_HEAD_HEAD_SHIFT);
11180 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11181 mmiowb();
11182}
11183
11184u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11185{
11186 u32 head, tail;
11187
11188 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11189 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11190
11191 if (rcd->rcvhdrtail_kvaddr)
11192 tail = get_rcvhdrtail(rcd);
11193 else
11194 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11195
11196 return head == tail;
11197}
11198
11199/*
11200 * Context Control and Receive Array encoding for buffer size:
11201 * 0x0 invalid
11202 * 0x1 4 KB
11203 * 0x2 8 KB
11204 * 0x3 16 KB
11205 * 0x4 32 KB
11206 * 0x5 64 KB
11207 * 0x6 128 KB
11208 * 0x7 256 KB
11209 * 0x8 512 KB (Receive Array only)
11210 * 0x9 1 MB (Receive Array only)
11211 * 0xa 2 MB (Receive Array only)
11212 *
11213 * 0xB-0xF - reserved (Receive Array only)
11214 *
11215 *
11216 * This routine assumes that the value has already been sanity checked.
11217 */
11218static u32 encoded_size(u32 size)
11219{
11220 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011221 case 4 * 1024: return 0x1;
11222 case 8 * 1024: return 0x2;
11223 case 16 * 1024: return 0x3;
11224 case 32 * 1024: return 0x4;
11225 case 64 * 1024: return 0x5;
11226 case 128 * 1024: return 0x6;
11227 case 256 * 1024: return 0x7;
11228 case 512 * 1024: return 0x8;
11229 case 1 * 1024 * 1024: return 0x9;
11230 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011231 }
11232 return 0x1; /* if invalid, go with the minimum size */
11233}
11234
11235void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11236{
11237 struct hfi1_ctxtdata *rcd;
11238 u64 rcvctrl, reg;
11239 int did_enable = 0;
11240
11241 rcd = dd->rcd[ctxt];
11242 if (!rcd)
11243 return;
11244
11245 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11246
11247 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11248 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011249 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11250 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011251 /* reset the tail and hdr addresses, and sequence count */
11252 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11253 rcd->rcvhdrq_phys);
11254 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11255 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11256 rcd->rcvhdrqtailaddr_phys);
11257 rcd->seq_cnt = 1;
11258
11259 /* reset the cached receive header queue head value */
11260 rcd->head = 0;
11261
11262 /*
11263 * Zero the receive header queue so we don't get false
11264 * positives when checking the sequence number. The
11265 * sequence numbers could land exactly on the same spot.
11266 * E.g. a rcd restart before the receive header wrapped.
11267 */
11268 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11269
11270 /* starting timeout */
11271 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11272
11273 /* enable the context */
11274 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11275
11276 /* clean the egr buffer size first */
11277 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11278 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11279 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11280 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11281
11282 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11283 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11284 did_enable = 1;
11285
11286 /* zero RcvEgrIndexHead */
11287 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11288
11289 /* set eager count and base index */
11290 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11291 & RCV_EGR_CTRL_EGR_CNT_MASK)
11292 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11293 (((rcd->eager_base >> RCV_SHIFT)
11294 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11295 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11296 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11297
11298 /*
11299 * Set TID (expected) count and base index.
11300 * rcd->expected_count is set to individual RcvArray entries,
11301 * not pairs, and the CSR takes a pair-count in groups of
11302 * four, so divide by 8.
11303 */
11304 reg = (((rcd->expected_count >> RCV_SHIFT)
11305 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11306 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11307 (((rcd->expected_base >> RCV_SHIFT)
11308 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11309 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11310 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011311 if (ctxt == HFI1_CTRL_CTXT)
11312 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011313 }
11314 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11315 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011316 /*
11317 * When receive context is being disabled turn on tail
11318 * update with a dummy tail address and then disable
11319 * receive context.
11320 */
11321 if (dd->rcvhdrtail_dummy_physaddr) {
11322 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11323 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011324 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011325 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11326 }
11327
Mike Marciniszyn77241052015-07-30 15:17:43 -040011328 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11329 }
11330 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11331 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11332 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11333 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11334 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11335 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011336 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11337 /* See comment on RcvCtxtCtrl.TailUpd above */
11338 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11339 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11340 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011341 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11342 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11343 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11344 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11345 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011346 /*
11347 * In one-packet-per-eager mode, the size comes from
11348 * the RcvArray entry.
11349 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011350 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11351 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11352 }
11353 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11354 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11355 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11356 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11357 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11358 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11359 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11360 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11361 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11362 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11363 rcd->rcvctrl = rcvctrl;
11364 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11365 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11366
11367 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011368 if (did_enable &&
11369 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011370 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11371 if (reg != 0) {
11372 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11373 ctxt, reg);
11374 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11375 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11376 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11377 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11378 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11379 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11380 ctxt, reg, reg == 0 ? "not" : "still");
11381 }
11382 }
11383
11384 if (did_enable) {
11385 /*
11386 * The interrupt timeout and count must be set after
11387 * the context is enabled to take effect.
11388 */
11389 /* set interrupt timeout */
11390 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11391 (u64)rcd->rcvavail_timeout <<
11392 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11393
11394 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11395 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11396 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11397 }
11398
11399 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11400 /*
11401 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011402 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11403 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011404 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011405 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11406 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011407}
11408
11409u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11410 u64 **cntrp)
11411{
11412 int ret;
11413 u64 val = 0;
11414
11415 if (namep) {
11416 ret = dd->cntrnameslen;
11417 if (pos != 0) {
11418 dd_dev_err(dd, "read_cntrs does not support indexing");
11419 return 0;
11420 }
11421 *namep = dd->cntrnames;
11422 } else {
11423 const struct cntr_entry *entry;
11424 int i, j;
11425
11426 ret = (dd->ndevcntrs) * sizeof(u64);
11427 if (pos != 0) {
11428 dd_dev_err(dd, "read_cntrs does not support indexing");
11429 return 0;
11430 }
11431
11432 /* Get the start of the block of counters */
11433 *cntrp = dd->cntrs;
11434
11435 /*
11436 * Now go and fill in each counter in the block.
11437 */
11438 for (i = 0; i < DEV_CNTR_LAST; i++) {
11439 entry = &dev_cntrs[i];
11440 hfi1_cdbg(CNTR, "reading %s", entry->name);
11441 if (entry->flags & CNTR_DISABLED) {
11442 /* Nothing */
11443 hfi1_cdbg(CNTR, "\tDisabled\n");
11444 } else {
11445 if (entry->flags & CNTR_VL) {
11446 hfi1_cdbg(CNTR, "\tPer VL\n");
11447 for (j = 0; j < C_VL_COUNT; j++) {
11448 val = entry->rw_cntr(entry,
11449 dd, j,
11450 CNTR_MODE_R,
11451 0);
11452 hfi1_cdbg(
11453 CNTR,
11454 "\t\tRead 0x%llx for %d\n",
11455 val, j);
11456 dd->cntrs[entry->offset + j] =
11457 val;
11458 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011459 } else if (entry->flags & CNTR_SDMA) {
11460 hfi1_cdbg(CNTR,
11461 "\t Per SDMA Engine\n");
11462 for (j = 0; j < dd->chip_sdma_engines;
11463 j++) {
11464 val =
11465 entry->rw_cntr(entry, dd, j,
11466 CNTR_MODE_R, 0);
11467 hfi1_cdbg(CNTR,
11468 "\t\tRead 0x%llx for %d\n",
11469 val, j);
11470 dd->cntrs[entry->offset + j] =
11471 val;
11472 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011473 } else {
11474 val = entry->rw_cntr(entry, dd,
11475 CNTR_INVALID_VL,
11476 CNTR_MODE_R, 0);
11477 dd->cntrs[entry->offset] = val;
11478 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11479 }
11480 }
11481 }
11482 }
11483 return ret;
11484}
11485
11486/*
11487 * Used by sysfs to create files for hfi stats to read
11488 */
11489u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11490 char **namep, u64 **cntrp)
11491{
11492 int ret;
11493 u64 val = 0;
11494
11495 if (namep) {
11496 ret = dd->portcntrnameslen;
11497 if (pos != 0) {
11498 dd_dev_err(dd, "index not supported");
11499 return 0;
11500 }
11501 *namep = dd->portcntrnames;
11502 } else {
11503 const struct cntr_entry *entry;
11504 struct hfi1_pportdata *ppd;
11505 int i, j;
11506
11507 ret = (dd->nportcntrs) * sizeof(u64);
11508 if (pos != 0) {
11509 dd_dev_err(dd, "indexing not supported");
11510 return 0;
11511 }
11512 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11513 *cntrp = ppd->cntrs;
11514
11515 for (i = 0; i < PORT_CNTR_LAST; i++) {
11516 entry = &port_cntrs[i];
11517 hfi1_cdbg(CNTR, "reading %s", entry->name);
11518 if (entry->flags & CNTR_DISABLED) {
11519 /* Nothing */
11520 hfi1_cdbg(CNTR, "\tDisabled\n");
11521 continue;
11522 }
11523
11524 if (entry->flags & CNTR_VL) {
11525 hfi1_cdbg(CNTR, "\tPer VL");
11526 for (j = 0; j < C_VL_COUNT; j++) {
11527 val = entry->rw_cntr(entry, ppd, j,
11528 CNTR_MODE_R,
11529 0);
11530 hfi1_cdbg(
11531 CNTR,
11532 "\t\tRead 0x%llx for %d",
11533 val, j);
11534 ppd->cntrs[entry->offset + j] = val;
11535 }
11536 } else {
11537 val = entry->rw_cntr(entry, ppd,
11538 CNTR_INVALID_VL,
11539 CNTR_MODE_R,
11540 0);
11541 ppd->cntrs[entry->offset] = val;
11542 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11543 }
11544 }
11545 }
11546 return ret;
11547}
11548
11549static void free_cntrs(struct hfi1_devdata *dd)
11550{
11551 struct hfi1_pportdata *ppd;
11552 int i;
11553
11554 if (dd->synth_stats_timer.data)
11555 del_timer_sync(&dd->synth_stats_timer);
11556 dd->synth_stats_timer.data = 0;
11557 ppd = (struct hfi1_pportdata *)(dd + 1);
11558 for (i = 0; i < dd->num_pports; i++, ppd++) {
11559 kfree(ppd->cntrs);
11560 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011561 free_percpu(ppd->ibport_data.rvp.rc_acks);
11562 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11563 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011564 ppd->cntrs = NULL;
11565 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011566 ppd->ibport_data.rvp.rc_acks = NULL;
11567 ppd->ibport_data.rvp.rc_qacks = NULL;
11568 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011569 }
11570 kfree(dd->portcntrnames);
11571 dd->portcntrnames = NULL;
11572 kfree(dd->cntrs);
11573 dd->cntrs = NULL;
11574 kfree(dd->scntrs);
11575 dd->scntrs = NULL;
11576 kfree(dd->cntrnames);
11577 dd->cntrnames = NULL;
11578}
11579
11580#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11581#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11582
11583static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11584 u64 *psval, void *context, int vl)
11585{
11586 u64 val;
11587 u64 sval = *psval;
11588
11589 if (entry->flags & CNTR_DISABLED) {
11590 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11591 return 0;
11592 }
11593
11594 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11595
11596 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11597
11598 /* If its a synthetic counter there is more work we need to do */
11599 if (entry->flags & CNTR_SYNTH) {
11600 if (sval == CNTR_MAX) {
11601 /* No need to read already saturated */
11602 return CNTR_MAX;
11603 }
11604
11605 if (entry->flags & CNTR_32BIT) {
11606 /* 32bit counters can wrap multiple times */
11607 u64 upper = sval >> 32;
11608 u64 lower = (sval << 32) >> 32;
11609
11610 if (lower > val) { /* hw wrapped */
11611 if (upper == CNTR_32BIT_MAX)
11612 val = CNTR_MAX;
11613 else
11614 upper++;
11615 }
11616
11617 if (val != CNTR_MAX)
11618 val = (upper << 32) | val;
11619
11620 } else {
11621 /* If we rolled we are saturated */
11622 if ((val < sval) || (val > CNTR_MAX))
11623 val = CNTR_MAX;
11624 }
11625 }
11626
11627 *psval = val;
11628
11629 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11630
11631 return val;
11632}
11633
11634static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11635 struct cntr_entry *entry,
11636 u64 *psval, void *context, int vl, u64 data)
11637{
11638 u64 val;
11639
11640 if (entry->flags & CNTR_DISABLED) {
11641 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11642 return 0;
11643 }
11644
11645 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11646
11647 if (entry->flags & CNTR_SYNTH) {
11648 *psval = data;
11649 if (entry->flags & CNTR_32BIT) {
11650 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11651 (data << 32) >> 32);
11652 val = data; /* return the full 64bit value */
11653 } else {
11654 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11655 data);
11656 }
11657 } else {
11658 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11659 }
11660
11661 *psval = val;
11662
11663 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11664
11665 return val;
11666}
11667
11668u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11669{
11670 struct cntr_entry *entry;
11671 u64 *sval;
11672
11673 entry = &dev_cntrs[index];
11674 sval = dd->scntrs + entry->offset;
11675
11676 if (vl != CNTR_INVALID_VL)
11677 sval += vl;
11678
11679 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11680}
11681
11682u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11683{
11684 struct cntr_entry *entry;
11685 u64 *sval;
11686
11687 entry = &dev_cntrs[index];
11688 sval = dd->scntrs + entry->offset;
11689
11690 if (vl != CNTR_INVALID_VL)
11691 sval += vl;
11692
11693 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11694}
11695
11696u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11697{
11698 struct cntr_entry *entry;
11699 u64 *sval;
11700
11701 entry = &port_cntrs[index];
11702 sval = ppd->scntrs + entry->offset;
11703
11704 if (vl != CNTR_INVALID_VL)
11705 sval += vl;
11706
11707 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11708 (index <= C_RCV_HDR_OVF_LAST)) {
11709 /* We do not want to bother for disabled contexts */
11710 return 0;
11711 }
11712
11713 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11714}
11715
11716u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11717{
11718 struct cntr_entry *entry;
11719 u64 *sval;
11720
11721 entry = &port_cntrs[index];
11722 sval = ppd->scntrs + entry->offset;
11723
11724 if (vl != CNTR_INVALID_VL)
11725 sval += vl;
11726
11727 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11728 (index <= C_RCV_HDR_OVF_LAST)) {
11729 /* We do not want to bother for disabled contexts */
11730 return 0;
11731 }
11732
11733 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11734}
11735
11736static void update_synth_timer(unsigned long opaque)
11737{
11738 u64 cur_tx;
11739 u64 cur_rx;
11740 u64 total_flits;
11741 u8 update = 0;
11742 int i, j, vl;
11743 struct hfi1_pportdata *ppd;
11744 struct cntr_entry *entry;
11745
11746 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11747
11748 /*
11749 * Rather than keep beating on the CSRs pick a minimal set that we can
11750 * check to watch for potential roll over. We can do this by looking at
11751 * the number of flits sent/recv. If the total flits exceeds 32bits then
11752 * we have to iterate all the counters and update.
11753 */
11754 entry = &dev_cntrs[C_DC_RCV_FLITS];
11755 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11756
11757 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11758 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11759
11760 hfi1_cdbg(
11761 CNTR,
11762 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11763 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11764
11765 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11766 /*
11767 * May not be strictly necessary to update but it won't hurt and
11768 * simplifies the logic here.
11769 */
11770 update = 1;
11771 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11772 dd->unit);
11773 } else {
11774 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11775 hfi1_cdbg(CNTR,
11776 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11777 total_flits, (u64)CNTR_32BIT_MAX);
11778 if (total_flits >= CNTR_32BIT_MAX) {
11779 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11780 dd->unit);
11781 update = 1;
11782 }
11783 }
11784
11785 if (update) {
11786 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11787 for (i = 0; i < DEV_CNTR_LAST; i++) {
11788 entry = &dev_cntrs[i];
11789 if (entry->flags & CNTR_VL) {
11790 for (vl = 0; vl < C_VL_COUNT; vl++)
11791 read_dev_cntr(dd, i, vl);
11792 } else {
11793 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11794 }
11795 }
11796 ppd = (struct hfi1_pportdata *)(dd + 1);
11797 for (i = 0; i < dd->num_pports; i++, ppd++) {
11798 for (j = 0; j < PORT_CNTR_LAST; j++) {
11799 entry = &port_cntrs[j];
11800 if (entry->flags & CNTR_VL) {
11801 for (vl = 0; vl < C_VL_COUNT; vl++)
11802 read_port_cntr(ppd, j, vl);
11803 } else {
11804 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11805 }
11806 }
11807 }
11808
11809 /*
11810 * We want the value in the register. The goal is to keep track
11811 * of the number of "ticks" not the counter value. In other
11812 * words if the register rolls we want to notice it and go ahead
11813 * and force an update.
11814 */
11815 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11816 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11817 CNTR_MODE_R, 0);
11818
11819 entry = &dev_cntrs[C_DC_RCV_FLITS];
11820 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11821 CNTR_MODE_R, 0);
11822
11823 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11824 dd->unit, dd->last_tx, dd->last_rx);
11825
11826 } else {
11827 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11828 }
11829
11830mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11831}
11832
11833#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11834static int init_cntrs(struct hfi1_devdata *dd)
11835{
Dean Luickc024c552016-01-11 18:30:57 -050011836 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011837 size_t sz;
11838 char *p;
11839 char name[C_MAX_NAME];
11840 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011841 const char *bit_type_32 = ",32";
11842 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011843
11844 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011845 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11846 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011847
11848 /***********************/
11849 /* per device counters */
11850 /***********************/
11851
11852 /* size names and determine how many we have*/
11853 dd->ndevcntrs = 0;
11854 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011855
11856 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011857 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11858 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11859 continue;
11860 }
11861
11862 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011863 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011864 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011865 snprintf(name, C_MAX_NAME, "%s%d",
11866 dev_cntrs[i].name,
11867 vl_from_idx(j));
11868 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011869 /* Add ",32" for 32-bit counters */
11870 if (dev_cntrs[i].flags & CNTR_32BIT)
11871 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011872 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011873 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011874 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011875 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011876 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011877 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011878 snprintf(name, C_MAX_NAME, "%s%d",
11879 dev_cntrs[i].name, j);
11880 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011881 /* Add ",32" for 32-bit counters */
11882 if (dev_cntrs[i].flags & CNTR_32BIT)
11883 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011884 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011885 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011886 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011887 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011888 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011889 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011890 /* Add ",32" for 32-bit counters */
11891 if (dev_cntrs[i].flags & CNTR_32BIT)
11892 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011893 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011894 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011895 }
11896 }
11897
11898 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011899 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011900 if (!dd->cntrs)
11901 goto bail;
11902
Dean Luickc024c552016-01-11 18:30:57 -050011903 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011904 if (!dd->scntrs)
11905 goto bail;
11906
Mike Marciniszyn77241052015-07-30 15:17:43 -040011907 /* allocate space for the counter names */
11908 dd->cntrnameslen = sz;
11909 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11910 if (!dd->cntrnames)
11911 goto bail;
11912
11913 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011914 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011915 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11916 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011917 } else if (dev_cntrs[i].flags & CNTR_VL) {
11918 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011919 snprintf(name, C_MAX_NAME, "%s%d",
11920 dev_cntrs[i].name,
11921 vl_from_idx(j));
11922 memcpy(p, name, strlen(name));
11923 p += strlen(name);
11924
11925 /* Counter is 32 bits */
11926 if (dev_cntrs[i].flags & CNTR_32BIT) {
11927 memcpy(p, bit_type_32, bit_type_32_sz);
11928 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011929 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011930
Mike Marciniszyn77241052015-07-30 15:17:43 -040011931 *p++ = '\n';
11932 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011933 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11934 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011935 snprintf(name, C_MAX_NAME, "%s%d",
11936 dev_cntrs[i].name, j);
11937 memcpy(p, name, strlen(name));
11938 p += strlen(name);
11939
11940 /* Counter is 32 bits */
11941 if (dev_cntrs[i].flags & CNTR_32BIT) {
11942 memcpy(p, bit_type_32, bit_type_32_sz);
11943 p += bit_type_32_sz;
11944 }
11945
11946 *p++ = '\n';
11947 }
11948 } else {
11949 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11950 p += strlen(dev_cntrs[i].name);
11951
11952 /* Counter is 32 bits */
11953 if (dev_cntrs[i].flags & CNTR_32BIT) {
11954 memcpy(p, bit_type_32, bit_type_32_sz);
11955 p += bit_type_32_sz;
11956 }
11957
11958 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040011959 }
11960 }
11961
11962 /*********************/
11963 /* per port counters */
11964 /*********************/
11965
11966 /*
11967 * Go through the counters for the overflows and disable the ones we
11968 * don't need. This varies based on platform so we need to do it
11969 * dynamically here.
11970 */
11971 rcv_ctxts = dd->num_rcv_contexts;
11972 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11973 i <= C_RCV_HDR_OVF_LAST; i++) {
11974 port_cntrs[i].flags |= CNTR_DISABLED;
11975 }
11976
11977 /* size port counter names and determine how many we have*/
11978 sz = 0;
11979 dd->nportcntrs = 0;
11980 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011981 if (port_cntrs[i].flags & CNTR_DISABLED) {
11982 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11983 continue;
11984 }
11985
11986 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011987 port_cntrs[i].offset = dd->nportcntrs;
11988 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011989 snprintf(name, C_MAX_NAME, "%s%d",
11990 port_cntrs[i].name,
11991 vl_from_idx(j));
11992 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011993 /* Add ",32" for 32-bit counters */
11994 if (port_cntrs[i].flags & CNTR_32BIT)
11995 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011996 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011997 dd->nportcntrs++;
11998 }
11999 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012000 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012001 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012002 /* Add ",32" for 32-bit counters */
12003 if (port_cntrs[i].flags & CNTR_32BIT)
12004 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012005 port_cntrs[i].offset = dd->nportcntrs;
12006 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012007 }
12008 }
12009
12010 /* allocate space for the counter names */
12011 dd->portcntrnameslen = sz;
12012 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12013 if (!dd->portcntrnames)
12014 goto bail;
12015
12016 /* fill in port cntr names */
12017 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12018 if (port_cntrs[i].flags & CNTR_DISABLED)
12019 continue;
12020
12021 if (port_cntrs[i].flags & CNTR_VL) {
12022 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012023 snprintf(name, C_MAX_NAME, "%s%d",
12024 port_cntrs[i].name,
12025 vl_from_idx(j));
12026 memcpy(p, name, strlen(name));
12027 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012028
12029 /* Counter is 32 bits */
12030 if (port_cntrs[i].flags & CNTR_32BIT) {
12031 memcpy(p, bit_type_32, bit_type_32_sz);
12032 p += bit_type_32_sz;
12033 }
12034
Mike Marciniszyn77241052015-07-30 15:17:43 -040012035 *p++ = '\n';
12036 }
12037 } else {
12038 memcpy(p, port_cntrs[i].name,
12039 strlen(port_cntrs[i].name));
12040 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012041
12042 /* Counter is 32 bits */
12043 if (port_cntrs[i].flags & CNTR_32BIT) {
12044 memcpy(p, bit_type_32, bit_type_32_sz);
12045 p += bit_type_32_sz;
12046 }
12047
Mike Marciniszyn77241052015-07-30 15:17:43 -040012048 *p++ = '\n';
12049 }
12050 }
12051
12052 /* allocate per port storage for counter values */
12053 ppd = (struct hfi1_pportdata *)(dd + 1);
12054 for (i = 0; i < dd->num_pports; i++, ppd++) {
12055 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12056 if (!ppd->cntrs)
12057 goto bail;
12058
12059 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12060 if (!ppd->scntrs)
12061 goto bail;
12062 }
12063
12064 /* CPU counters need to be allocated and zeroed */
12065 if (init_cpu_counters(dd))
12066 goto bail;
12067
12068 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12069 return 0;
12070bail:
12071 free_cntrs(dd);
12072 return -ENOMEM;
12073}
12074
Mike Marciniszyn77241052015-07-30 15:17:43 -040012075static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12076{
12077 switch (chip_lstate) {
12078 default:
12079 dd_dev_err(dd,
12080 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12081 chip_lstate);
12082 /* fall through */
12083 case LSTATE_DOWN:
12084 return IB_PORT_DOWN;
12085 case LSTATE_INIT:
12086 return IB_PORT_INIT;
12087 case LSTATE_ARMED:
12088 return IB_PORT_ARMED;
12089 case LSTATE_ACTIVE:
12090 return IB_PORT_ACTIVE;
12091 }
12092}
12093
12094u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12095{
12096 /* look at the HFI meta-states only */
12097 switch (chip_pstate & 0xf0) {
12098 default:
12099 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12100 chip_pstate);
12101 /* fall through */
12102 case PLS_DISABLED:
12103 return IB_PORTPHYSSTATE_DISABLED;
12104 case PLS_OFFLINE:
12105 return OPA_PORTPHYSSTATE_OFFLINE;
12106 case PLS_POLLING:
12107 return IB_PORTPHYSSTATE_POLLING;
12108 case PLS_CONFIGPHY:
12109 return IB_PORTPHYSSTATE_TRAINING;
12110 case PLS_LINKUP:
12111 return IB_PORTPHYSSTATE_LINKUP;
12112 case PLS_PHYTEST:
12113 return IB_PORTPHYSSTATE_PHY_TEST;
12114 }
12115}
12116
12117/* return the OPA port logical state name */
12118const char *opa_lstate_name(u32 lstate)
12119{
12120 static const char * const port_logical_names[] = {
12121 "PORT_NOP",
12122 "PORT_DOWN",
12123 "PORT_INIT",
12124 "PORT_ARMED",
12125 "PORT_ACTIVE",
12126 "PORT_ACTIVE_DEFER",
12127 };
12128 if (lstate < ARRAY_SIZE(port_logical_names))
12129 return port_logical_names[lstate];
12130 return "unknown";
12131}
12132
12133/* return the OPA port physical state name */
12134const char *opa_pstate_name(u32 pstate)
12135{
12136 static const char * const port_physical_names[] = {
12137 "PHYS_NOP",
12138 "reserved1",
12139 "PHYS_POLL",
12140 "PHYS_DISABLED",
12141 "PHYS_TRAINING",
12142 "PHYS_LINKUP",
12143 "PHYS_LINK_ERR_RECOVER",
12144 "PHYS_PHY_TEST",
12145 "reserved8",
12146 "PHYS_OFFLINE",
12147 "PHYS_GANGED",
12148 "PHYS_TEST",
12149 };
12150 if (pstate < ARRAY_SIZE(port_physical_names))
12151 return port_physical_names[pstate];
12152 return "unknown";
12153}
12154
12155/*
12156 * Read the hardware link state and set the driver's cached value of it.
12157 * Return the (new) current value.
12158 */
12159u32 get_logical_state(struct hfi1_pportdata *ppd)
12160{
12161 u32 new_state;
12162
12163 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12164 if (new_state != ppd->lstate) {
12165 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12166 opa_lstate_name(new_state), new_state);
12167 ppd->lstate = new_state;
12168 }
12169 /*
12170 * Set port status flags in the page mapped into userspace
12171 * memory. Do it here to ensure a reliable state - this is
12172 * the only function called by all state handling code.
12173 * Always set the flags due to the fact that the cache value
12174 * might have been changed explicitly outside of this
12175 * function.
12176 */
12177 if (ppd->statusp) {
12178 switch (ppd->lstate) {
12179 case IB_PORT_DOWN:
12180 case IB_PORT_INIT:
12181 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12182 HFI1_STATUS_IB_READY);
12183 break;
12184 case IB_PORT_ARMED:
12185 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12186 break;
12187 case IB_PORT_ACTIVE:
12188 *ppd->statusp |= HFI1_STATUS_IB_READY;
12189 break;
12190 }
12191 }
12192 return ppd->lstate;
12193}
12194
12195/**
12196 * wait_logical_linkstate - wait for an IB link state change to occur
12197 * @ppd: port device
12198 * @state: the state to wait for
12199 * @msecs: the number of milliseconds to wait
12200 *
12201 * Wait up to msecs milliseconds for IB link state change to occur.
12202 * For now, take the easy polling route.
12203 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12204 */
12205static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12206 int msecs)
12207{
12208 unsigned long timeout;
12209
12210 timeout = jiffies + msecs_to_jiffies(msecs);
12211 while (1) {
12212 if (get_logical_state(ppd) == state)
12213 return 0;
12214 if (time_after(jiffies, timeout))
12215 break;
12216 msleep(20);
12217 }
12218 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12219
12220 return -ETIMEDOUT;
12221}
12222
12223u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12224{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012225 u32 pstate;
12226 u32 ib_pstate;
12227
12228 pstate = read_physical_state(ppd->dd);
12229 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012230 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012231 dd_dev_info(ppd->dd,
12232 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12233 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12234 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012235 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012236 }
12237 return ib_pstate;
12238}
12239
12240/*
12241 * Read/modify/write ASIC_QSFP register bits as selected by mask
12242 * data: 0 or 1 in the positions depending on what needs to be written
12243 * dir: 0 for read, 1 for write
12244 * mask: select by setting
12245 * I2CCLK (bit 0)
12246 * I2CDATA (bit 1)
12247 */
12248u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12249 u32 mask)
12250{
12251 u64 qsfp_oe, target_oe;
12252
12253 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12254 if (mask) {
12255 /* We are writing register bits, so lock access */
12256 dir &= mask;
12257 data &= mask;
12258
12259 qsfp_oe = read_csr(dd, target_oe);
12260 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12261 write_csr(dd, target_oe, qsfp_oe);
12262 }
12263 /* We are exclusively reading bits here, but it is unlikely
12264 * we'll get valid data when we set the direction of the pin
12265 * in the same call, so read should call this function again
12266 * to get valid data
12267 */
12268 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12269}
12270
12271#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12272(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12273
12274#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12275(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12276
12277int hfi1_init_ctxt(struct send_context *sc)
12278{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012279 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012280 struct hfi1_devdata *dd = sc->dd;
12281 u64 reg;
12282 u8 set = (sc->type == SC_USER ?
12283 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12284 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12285 reg = read_kctxt_csr(dd, sc->hw_context,
12286 SEND_CTXT_CHECK_ENABLE);
12287 if (set)
12288 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12289 else
12290 SET_STATIC_RATE_CONTROL_SMASK(reg);
12291 write_kctxt_csr(dd, sc->hw_context,
12292 SEND_CTXT_CHECK_ENABLE, reg);
12293 }
12294 return 0;
12295}
12296
12297int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12298{
12299 int ret = 0;
12300 u64 reg;
12301
12302 if (dd->icode != ICODE_RTL_SILICON) {
12303 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12304 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12305 __func__);
12306 return -EINVAL;
12307 }
12308 reg = read_csr(dd, ASIC_STS_THERM);
12309 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12310 ASIC_STS_THERM_CURR_TEMP_MASK);
12311 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12312 ASIC_STS_THERM_LO_TEMP_MASK);
12313 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12314 ASIC_STS_THERM_HI_TEMP_MASK);
12315 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12316 ASIC_STS_THERM_CRIT_TEMP_MASK);
12317 /* triggers is a 3-bit value - 1 bit per trigger. */
12318 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12319
12320 return ret;
12321}
12322
12323/* ========================================================================= */
12324
12325/*
12326 * Enable/disable chip from delivering interrupts.
12327 */
12328void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12329{
12330 int i;
12331
12332 /*
12333 * In HFI, the mask needs to be 1 to allow interrupts.
12334 */
12335 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012336 /* enable all interrupts */
12337 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012338 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012339
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012340 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012341 } else {
12342 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012343 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012344 }
12345}
12346
12347/*
12348 * Clear all interrupt sources on the chip.
12349 */
12350static void clear_all_interrupts(struct hfi1_devdata *dd)
12351{
12352 int i;
12353
12354 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012355 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012356
12357 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12358 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12359 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12360 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12361 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12362 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12363 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12364 for (i = 0; i < dd->chip_send_contexts; i++)
12365 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12366 for (i = 0; i < dd->chip_sdma_engines; i++)
12367 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12368
12369 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12370 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12371 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12372}
12373
12374/* Move to pcie.c? */
12375static void disable_intx(struct pci_dev *pdev)
12376{
12377 pci_intx(pdev, 0);
12378}
12379
12380static void clean_up_interrupts(struct hfi1_devdata *dd)
12381{
12382 int i;
12383
12384 /* remove irqs - must happen before disabling/turning off */
12385 if (dd->num_msix_entries) {
12386 /* MSI-X */
12387 struct hfi1_msix_entry *me = dd->msix_entries;
12388
12389 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012390 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012391 continue;
12392 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012393 free_irq(me->msix.vector, me->arg);
12394 }
12395 } else {
12396 /* INTx */
12397 if (dd->requested_intx_irq) {
12398 free_irq(dd->pcidev->irq, dd);
12399 dd->requested_intx_irq = 0;
12400 }
12401 }
12402
12403 /* turn off interrupts */
12404 if (dd->num_msix_entries) {
12405 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012406 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012407 } else {
12408 /* INTx */
12409 disable_intx(dd->pcidev);
12410 }
12411
12412 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012413 kfree(dd->msix_entries);
12414 dd->msix_entries = NULL;
12415 dd->num_msix_entries = 0;
12416}
12417
12418/*
12419 * Remap the interrupt source from the general handler to the given MSI-X
12420 * interrupt.
12421 */
12422static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12423{
12424 u64 reg;
12425 int m, n;
12426
12427 /* clear from the handled mask of the general interrupt */
12428 m = isrc / 64;
12429 n = isrc % 64;
12430 dd->gi_mask[m] &= ~((u64)1 << n);
12431
12432 /* direct the chip source to the given MSI-X interrupt */
12433 m = isrc / 8;
12434 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012435 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12436 reg &= ~((u64)0xff << (8 * n));
12437 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12438 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012439}
12440
12441static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12442 int engine, int msix_intr)
12443{
12444 /*
12445 * SDMA engine interrupt sources grouped by type, rather than
12446 * engine. Per-engine interrupts are as follows:
12447 * SDMA
12448 * SDMAProgress
12449 * SDMAIdle
12450 */
Jubin John8638b772016-02-14 20:19:24 -080012451 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Mike Marciniszyn77241052015-07-30 15:17:43 -040012452 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012453 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Mike Marciniszyn77241052015-07-30 15:17:43 -040012454 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012455 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Mike Marciniszyn77241052015-07-30 15:17:43 -040012456 msix_intr);
12457}
12458
Mike Marciniszyn77241052015-07-30 15:17:43 -040012459static int request_intx_irq(struct hfi1_devdata *dd)
12460{
12461 int ret;
12462
Jubin John98050712015-11-16 21:59:27 -050012463 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12464 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012465 ret = request_irq(dd->pcidev->irq, general_interrupt,
12466 IRQF_SHARED, dd->intx_name, dd);
12467 if (ret)
12468 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12469 ret);
12470 else
12471 dd->requested_intx_irq = 1;
12472 return ret;
12473}
12474
12475static int request_msix_irqs(struct hfi1_devdata *dd)
12476{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012477 int first_general, last_general;
12478 int first_sdma, last_sdma;
12479 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012480 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012481
12482 /* calculate the ranges we are going to use */
12483 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012484 last_general = first_general + 1;
12485 first_sdma = last_general;
12486 last_sdma = first_sdma + dd->num_sdma;
12487 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012488 last_rx = first_rx + dd->n_krcv_queues;
12489
12490 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012491 * Sanity check - the code expects all SDMA chip source
12492 * interrupts to be in the same CSR, starting at bit 0. Verify
12493 * that this is true by checking the bit location of the start.
12494 */
12495 BUILD_BUG_ON(IS_SDMA_START % 64);
12496
12497 for (i = 0; i < dd->num_msix_entries; i++) {
12498 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12499 const char *err_info;
12500 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012501 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012502 void *arg;
12503 int idx;
12504 struct hfi1_ctxtdata *rcd = NULL;
12505 struct sdma_engine *sde = NULL;
12506
12507 /* obtain the arguments to request_irq */
12508 if (first_general <= i && i < last_general) {
12509 idx = i - first_general;
12510 handler = general_interrupt;
12511 arg = dd;
12512 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012513 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012514 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012515 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012516 } else if (first_sdma <= i && i < last_sdma) {
12517 idx = i - first_sdma;
12518 sde = &dd->per_sdma[idx];
12519 handler = sdma_interrupt;
12520 arg = sde;
12521 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012522 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012523 err_info = "sdma";
12524 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012525 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012526 } else if (first_rx <= i && i < last_rx) {
12527 idx = i - first_rx;
12528 rcd = dd->rcd[idx];
12529 /* no interrupt if no rcd */
12530 if (!rcd)
12531 continue;
12532 /*
12533 * Set the interrupt register and mask for this
12534 * context's interrupt.
12535 */
Jubin John8638b772016-02-14 20:19:24 -080012536 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012537 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012538 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012539 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012540 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012541 arg = rcd;
12542 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012543 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012544 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012545 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012546 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012547 } else {
12548 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012549 * ignore it
12550 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012551 dd_dev_err(dd,
12552 "Unexpected extra MSI-X interrupt %d\n", i);
12553 continue;
12554 }
12555 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012556 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012557 continue;
12558 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012559 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012560
Dean Luickf4f30031c2015-10-26 10:28:44 -040012561 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12562 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012563 if (ret) {
12564 dd_dev_err(dd,
12565 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12566 err_info, me->msix.vector, idx, ret);
12567 return ret;
12568 }
12569 /*
12570 * assign arg after request_irq call, so it will be
12571 * cleaned up
12572 */
12573 me->arg = arg;
12574
Mitko Haralanov957558c2016-02-03 14:33:40 -080012575 ret = hfi1_get_irq_affinity(dd, me);
12576 if (ret)
12577 dd_dev_err(dd,
12578 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012579 }
12580
Mike Marciniszyn77241052015-07-30 15:17:43 -040012581 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012582}
12583
12584/*
12585 * Set the general handler to accept all interrupts, remap all
12586 * chip interrupts back to MSI-X 0.
12587 */
12588static void reset_interrupts(struct hfi1_devdata *dd)
12589{
12590 int i;
12591
12592 /* all interrupts handled by the general handler */
12593 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12594 dd->gi_mask[i] = ~(u64)0;
12595
12596 /* all chip interrupts map to MSI-X 0 */
12597 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012598 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012599}
12600
12601static int set_up_interrupts(struct hfi1_devdata *dd)
12602{
12603 struct hfi1_msix_entry *entries;
12604 u32 total, request;
12605 int i, ret;
12606 int single_interrupt = 0; /* we expect to have all the interrupts */
12607
12608 /*
12609 * Interrupt count:
12610 * 1 general, "slow path" interrupt (includes the SDMA engines
12611 * slow source, SDMACleanupDone)
12612 * N interrupts - one per used SDMA engine
12613 * M interrupt - one per kernel receive context
12614 */
12615 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12616
12617 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12618 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012619 ret = -ENOMEM;
12620 goto fail;
12621 }
12622 /* 1-1 MSI-X entry assignment */
12623 for (i = 0; i < total; i++)
12624 entries[i].msix.entry = i;
12625
12626 /* ask for MSI-X interrupts */
12627 request = total;
12628 request_msix(dd, &request, entries);
12629
12630 if (request == 0) {
12631 /* using INTx */
12632 /* dd->num_msix_entries already zero */
12633 kfree(entries);
12634 single_interrupt = 1;
12635 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12636 } else {
12637 /* using MSI-X */
12638 dd->num_msix_entries = request;
12639 dd->msix_entries = entries;
12640
12641 if (request != total) {
12642 /* using MSI-X, with reduced interrupts */
12643 dd_dev_err(
12644 dd,
12645 "cannot handle reduced interrupt case, want %u, got %u\n",
12646 total, request);
12647 ret = -EINVAL;
12648 goto fail;
12649 }
12650 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12651 }
12652
12653 /* mask all interrupts */
12654 set_intr_state(dd, 0);
12655 /* clear all pending interrupts */
12656 clear_all_interrupts(dd);
12657
12658 /* reset general handler mask, chip MSI-X mappings */
12659 reset_interrupts(dd);
12660
12661 if (single_interrupt)
12662 ret = request_intx_irq(dd);
12663 else
12664 ret = request_msix_irqs(dd);
12665 if (ret)
12666 goto fail;
12667
12668 return 0;
12669
12670fail:
12671 clean_up_interrupts(dd);
12672 return ret;
12673}
12674
12675/*
12676 * Set up context values in dd. Sets:
12677 *
12678 * num_rcv_contexts - number of contexts being used
12679 * n_krcv_queues - number of kernel contexts
12680 * first_user_ctxt - first non-kernel context in array of contexts
12681 * freectxts - number of free user contexts
12682 * num_send_contexts - number of PIO send contexts being used
12683 */
12684static int set_up_context_variables(struct hfi1_devdata *dd)
12685{
12686 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012687 int total_contexts;
12688 int ret;
12689 unsigned ngroups;
12690
12691 /*
12692 * Kernel contexts: (to be fixed later):
12693 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012694 * - Context 0 - control context (VL15/multicast/error)
12695 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012696 */
12697 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012698 /*
12699 * Don't count context 0 in n_krcvqs since
12700 * is isn't used for normal verbs traffic.
12701 *
12702 * krcvqs will reflect number of kernel
12703 * receive contexts above 0.
12704 */
12705 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012706 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012707 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012708 num_kernel_contexts =
12709 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12710 /*
12711 * Every kernel receive context needs an ACK send context.
12712 * one send context is allocated for each VL{0-7} and VL15
12713 */
12714 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12715 dd_dev_err(dd,
12716 "Reducing # kernel rcv contexts to: %d, from %d\n",
12717 (int)(dd->chip_send_contexts - num_vls - 1),
12718 (int)num_kernel_contexts);
12719 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12720 }
12721 /*
12722 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012723 * - default to 1 user context per CPU if num_user_contexts is
12724 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012725 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012726 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012727 num_user_contexts = num_online_cpus();
12728
12729 total_contexts = num_kernel_contexts + num_user_contexts;
12730
12731 /*
12732 * Adjust the counts given a global max.
12733 */
12734 if (total_contexts > dd->chip_rcv_contexts) {
12735 dd_dev_err(dd,
12736 "Reducing # user receive contexts to: %d, from %d\n",
12737 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12738 (int)num_user_contexts);
12739 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12740 /* recalculate */
12741 total_contexts = num_kernel_contexts + num_user_contexts;
12742 }
12743
12744 /* the first N are kernel contexts, the rest are user contexts */
12745 dd->num_rcv_contexts = total_contexts;
12746 dd->n_krcv_queues = num_kernel_contexts;
12747 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012748 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012749 dd->freectxts = num_user_contexts;
12750 dd_dev_info(dd,
12751 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12752 (int)dd->chip_rcv_contexts,
12753 (int)dd->num_rcv_contexts,
12754 (int)dd->n_krcv_queues,
12755 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12756
12757 /*
12758 * Receive array allocation:
12759 * All RcvArray entries are divided into groups of 8. This
12760 * is required by the hardware and will speed up writes to
12761 * consecutive entries by using write-combining of the entire
12762 * cacheline.
12763 *
12764 * The number of groups are evenly divided among all contexts.
12765 * any left over groups will be given to the first N user
12766 * contexts.
12767 */
12768 dd->rcv_entries.group_size = RCV_INCREMENT;
12769 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12770 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12771 dd->rcv_entries.nctxt_extra = ngroups -
12772 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12773 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12774 dd->rcv_entries.ngroups,
12775 dd->rcv_entries.nctxt_extra);
12776 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12777 MAX_EAGER_ENTRIES * 2) {
12778 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12779 dd->rcv_entries.group_size;
12780 dd_dev_info(dd,
12781 "RcvArray group count too high, change to %u\n",
12782 dd->rcv_entries.ngroups);
12783 dd->rcv_entries.nctxt_extra = 0;
12784 }
12785 /*
12786 * PIO send contexts
12787 */
12788 ret = init_sc_pools_and_sizes(dd);
12789 if (ret >= 0) { /* success */
12790 dd->num_send_contexts = ret;
12791 dd_dev_info(
12792 dd,
12793 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12794 dd->chip_send_contexts,
12795 dd->num_send_contexts,
12796 dd->sc_sizes[SC_KERNEL].count,
12797 dd->sc_sizes[SC_ACK].count,
12798 dd->sc_sizes[SC_USER].count);
12799 ret = 0; /* success */
12800 }
12801
12802 return ret;
12803}
12804
12805/*
12806 * Set the device/port partition key table. The MAD code
12807 * will ensure that, at least, the partial management
12808 * partition key is present in the table.
12809 */
12810static void set_partition_keys(struct hfi1_pportdata *ppd)
12811{
12812 struct hfi1_devdata *dd = ppd->dd;
12813 u64 reg = 0;
12814 int i;
12815
12816 dd_dev_info(dd, "Setting partition keys\n");
12817 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12818 reg |= (ppd->pkeys[i] &
12819 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12820 ((i % 4) *
12821 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12822 /* Each register holds 4 PKey values. */
12823 if ((i % 4) == 3) {
12824 write_csr(dd, RCV_PARTITION_KEY +
12825 ((i - 3) * 2), reg);
12826 reg = 0;
12827 }
12828 }
12829
12830 /* Always enable HW pkeys check when pkeys table is set */
12831 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12832}
12833
12834/*
12835 * These CSRs and memories are uninitialized on reset and must be
12836 * written before reading to set the ECC/parity bits.
12837 *
12838 * NOTE: All user context CSRs that are not mmaped write-only
12839 * (e.g. the TID flows) must be initialized even if the driver never
12840 * reads them.
12841 */
12842static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12843{
12844 int i, j;
12845
12846 /* CceIntMap */
12847 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012848 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012849
12850 /* SendCtxtCreditReturnAddr */
12851 for (i = 0; i < dd->chip_send_contexts; i++)
12852 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12853
12854 /* PIO Send buffers */
12855 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080012856 /*
12857 * These are not normally read, and (presently) have no method
12858 * to be read, so are not pre-initialized
12859 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012860
12861 /* RcvHdrAddr */
12862 /* RcvHdrTailAddr */
12863 /* RcvTidFlowTable */
12864 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12865 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12866 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12867 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012868 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012869 }
12870
12871 /* RcvArray */
12872 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080012873 write_csr(dd, RCV_ARRAY + (8 * i),
Mike Marciniszyn77241052015-07-30 15:17:43 -040012874 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12875
12876 /* RcvQPMapTable */
12877 for (i = 0; i < 32; i++)
12878 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12879}
12880
12881/*
12882 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12883 */
12884static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12885 u64 ctrl_bits)
12886{
12887 unsigned long timeout;
12888 u64 reg;
12889
12890 /* is the condition present? */
12891 reg = read_csr(dd, CCE_STATUS);
12892 if ((reg & status_bits) == 0)
12893 return;
12894
12895 /* clear the condition */
12896 write_csr(dd, CCE_CTRL, ctrl_bits);
12897
12898 /* wait for the condition to clear */
12899 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12900 while (1) {
12901 reg = read_csr(dd, CCE_STATUS);
12902 if ((reg & status_bits) == 0)
12903 return;
12904 if (time_after(jiffies, timeout)) {
12905 dd_dev_err(dd,
12906 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12907 status_bits, reg & status_bits);
12908 return;
12909 }
12910 udelay(1);
12911 }
12912}
12913
12914/* set CCE CSRs to chip reset defaults */
12915static void reset_cce_csrs(struct hfi1_devdata *dd)
12916{
12917 int i;
12918
12919 /* CCE_REVISION read-only */
12920 /* CCE_REVISION2 read-only */
12921 /* CCE_CTRL - bits clear automatically */
12922 /* CCE_STATUS read-only, use CceCtrl to clear */
12923 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12924 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12925 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12926 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12927 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12928 /* CCE_ERR_STATUS read-only */
12929 write_csr(dd, CCE_ERR_MASK, 0);
12930 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12931 /* CCE_ERR_FORCE leave alone */
12932 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12933 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12934 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12935 /* CCE_PCIE_CTRL leave alone */
12936 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12937 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12938 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12939 CCE_MSIX_TABLE_UPPER_RESETCSR);
12940 }
12941 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12942 /* CCE_MSIX_PBA read-only */
12943 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12944 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12945 }
12946 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12947 write_csr(dd, CCE_INT_MAP, 0);
12948 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12949 /* CCE_INT_STATUS read-only */
12950 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12951 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12952 /* CCE_INT_FORCE leave alone */
12953 /* CCE_INT_BLOCKED read-only */
12954 }
12955 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12956 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12957}
12958
12959/* set ASIC CSRs to chip reset defaults */
12960static void reset_asic_csrs(struct hfi1_devdata *dd)
12961{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012962 int i;
12963
12964 /*
12965 * If the HFIs are shared between separate nodes or VMs,
12966 * then more will need to be done here. One idea is a module
12967 * parameter that returns early, letting the first power-on or
12968 * a known first load do the reset and blocking all others.
12969 */
12970
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012971 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12972 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012973
12974 if (dd->icode != ICODE_FPGA_EMULATION) {
12975 /* emulation does not have an SBus - leave these alone */
12976 /*
12977 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12978 * Notes:
12979 * o The reset is not zero if aimed at the core. See the
12980 * SBus documentation for details.
12981 * o If the SBus firmware has been updated (e.g. by the BIOS),
12982 * will the reset revert that?
12983 */
12984 /* ASIC_CFG_SBUS_REQUEST leave alone */
12985 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12986 }
12987 /* ASIC_SBUS_RESULT read-only */
12988 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12989 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12990 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12991 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012992
12993 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012994 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012995
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012996 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012997 /* ASIC_STS_THERM read-only */
12998 /* ASIC_CFG_RESET leave alone */
12999
13000 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
13001 /* ASIC_PCIE_SD_HOST_STATUS read-only */
13002 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
13003 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
13004 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
13005 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
13006 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
13007 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
13008 for (i = 0; i < 16; i++)
13009 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
13010
13011 /* ASIC_GPIO_IN read-only */
13012 write_csr(dd, ASIC_GPIO_OE, 0);
13013 write_csr(dd, ASIC_GPIO_INVERT, 0);
13014 write_csr(dd, ASIC_GPIO_OUT, 0);
13015 write_csr(dd, ASIC_GPIO_MASK, 0);
13016 /* ASIC_GPIO_STATUS read-only */
13017 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
13018 /* ASIC_GPIO_FORCE leave alone */
13019
13020 /* ASIC_QSFP1_IN read-only */
13021 write_csr(dd, ASIC_QSFP1_OE, 0);
13022 write_csr(dd, ASIC_QSFP1_INVERT, 0);
13023 write_csr(dd, ASIC_QSFP1_OUT, 0);
13024 write_csr(dd, ASIC_QSFP1_MASK, 0);
13025 /* ASIC_QSFP1_STATUS read-only */
13026 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13027 /* ASIC_QSFP1_FORCE leave alone */
13028
13029 /* ASIC_QSFP2_IN read-only */
13030 write_csr(dd, ASIC_QSFP2_OE, 0);
13031 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13032 write_csr(dd, ASIC_QSFP2_OUT, 0);
13033 write_csr(dd, ASIC_QSFP2_MASK, 0);
13034 /* ASIC_QSFP2_STATUS read-only */
13035 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13036 /* ASIC_QSFP2_FORCE leave alone */
13037
13038 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13039 /* this also writes a NOP command, clearing paging mode */
13040 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13041 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013042}
13043
13044/* set MISC CSRs to chip reset defaults */
13045static void reset_misc_csrs(struct hfi1_devdata *dd)
13046{
13047 int i;
13048
13049 for (i = 0; i < 32; i++) {
13050 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13051 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13052 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13053 }
Jubin John4d114fd2016-02-14 20:21:43 -080013054 /*
13055 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13056 * only be written 128-byte chunks
13057 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013058 /* init RSA engine to clear lingering errors */
13059 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13060 write_csr(dd, MISC_CFG_RSA_MU, 0);
13061 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13062 /* MISC_STS_8051_DIGEST read-only */
13063 /* MISC_STS_SBM_DIGEST read-only */
13064 /* MISC_STS_PCIE_DIGEST read-only */
13065 /* MISC_STS_FAB_DIGEST read-only */
13066 /* MISC_ERR_STATUS read-only */
13067 write_csr(dd, MISC_ERR_MASK, 0);
13068 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13069 /* MISC_ERR_FORCE leave alone */
13070}
13071
13072/* set TXE CSRs to chip reset defaults */
13073static void reset_txe_csrs(struct hfi1_devdata *dd)
13074{
13075 int i;
13076
13077 /*
13078 * TXE Kernel CSRs
13079 */
13080 write_csr(dd, SEND_CTRL, 0);
13081 __cm_reset(dd, 0); /* reset CM internal state */
13082 /* SEND_CONTEXTS read-only */
13083 /* SEND_DMA_ENGINES read-only */
13084 /* SEND_PIO_MEM_SIZE read-only */
13085 /* SEND_DMA_MEM_SIZE read-only */
13086 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13087 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13088 /* SEND_PIO_ERR_STATUS read-only */
13089 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13090 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13091 /* SEND_PIO_ERR_FORCE leave alone */
13092 /* SEND_DMA_ERR_STATUS read-only */
13093 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13094 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13095 /* SEND_DMA_ERR_FORCE leave alone */
13096 /* SEND_EGRESS_ERR_STATUS read-only */
13097 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13098 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13099 /* SEND_EGRESS_ERR_FORCE leave alone */
13100 write_csr(dd, SEND_BTH_QP, 0);
13101 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13102 write_csr(dd, SEND_SC2VLT0, 0);
13103 write_csr(dd, SEND_SC2VLT1, 0);
13104 write_csr(dd, SEND_SC2VLT2, 0);
13105 write_csr(dd, SEND_SC2VLT3, 0);
13106 write_csr(dd, SEND_LEN_CHECK0, 0);
13107 write_csr(dd, SEND_LEN_CHECK1, 0);
13108 /* SEND_ERR_STATUS read-only */
13109 write_csr(dd, SEND_ERR_MASK, 0);
13110 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13111 /* SEND_ERR_FORCE read-only */
13112 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013113 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013114 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013115 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13116 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13117 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013118 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013119 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013120 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013121 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013122 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13123 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
13124 SEND_CM_GLOBAL_CREDIT_RESETCSR);
13125 /* SEND_CM_CREDIT_USED_STATUS read-only */
13126 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13127 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13128 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13129 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13130 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13131 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013132 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013133 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13134 /* SEND_CM_CREDIT_USED_VL read-only */
13135 /* SEND_CM_CREDIT_USED_VL15 read-only */
13136 /* SEND_EGRESS_CTXT_STATUS read-only */
13137 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13138 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13139 /* SEND_EGRESS_ERR_INFO read-only */
13140 /* SEND_EGRESS_ERR_SOURCE read-only */
13141
13142 /*
13143 * TXE Per-Context CSRs
13144 */
13145 for (i = 0; i < dd->chip_send_contexts; i++) {
13146 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13147 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13148 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13149 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13150 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13151 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13152 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13153 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13154 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13155 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13156 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13157 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13158 }
13159
13160 /*
13161 * TXE Per-SDMA CSRs
13162 */
13163 for (i = 0; i < dd->chip_sdma_engines; i++) {
13164 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13165 /* SEND_DMA_STATUS read-only */
13166 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13167 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13168 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13169 /* SEND_DMA_HEAD read-only */
13170 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13171 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13172 /* SEND_DMA_IDLE_CNT read-only */
13173 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13174 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13175 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13176 /* SEND_DMA_ENG_ERR_STATUS read-only */
13177 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13178 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13179 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13180 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13181 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13182 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13183 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13184 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13185 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13186 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13187 }
13188}
13189
13190/*
13191 * Expect on entry:
13192 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13193 */
13194static void init_rbufs(struct hfi1_devdata *dd)
13195{
13196 u64 reg;
13197 int count;
13198
13199 /*
13200 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13201 * clear.
13202 */
13203 count = 0;
13204 while (1) {
13205 reg = read_csr(dd, RCV_STATUS);
13206 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13207 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13208 break;
13209 /*
13210 * Give up after 1ms - maximum wait time.
13211 *
13212 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13213 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13214 * 148 KB / (66% * 250MB/s) = 920us
13215 */
13216 if (count++ > 500) {
13217 dd_dev_err(dd,
13218 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13219 __func__, reg);
13220 break;
13221 }
13222 udelay(2); /* do not busy-wait the CSR */
13223 }
13224
13225 /* start the init - expect RcvCtrl to be 0 */
13226 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13227
13228 /*
13229 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13230 * period after the write before RcvStatus.RxRbufInitDone is valid.
13231 * The delay in the first run through the loop below is sufficient and
13232 * required before the first read of RcvStatus.RxRbufInintDone.
13233 */
13234 read_csr(dd, RCV_CTRL);
13235
13236 /* wait for the init to finish */
13237 count = 0;
13238 while (1) {
13239 /* delay is required first time through - see above */
13240 udelay(2); /* do not busy-wait the CSR */
13241 reg = read_csr(dd, RCV_STATUS);
13242 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13243 break;
13244
13245 /* give up after 100us - slowest possible at 33MHz is 73us */
13246 if (count++ > 50) {
13247 dd_dev_err(dd,
13248 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13249 __func__);
13250 break;
13251 }
13252 }
13253}
13254
13255/* set RXE CSRs to chip reset defaults */
13256static void reset_rxe_csrs(struct hfi1_devdata *dd)
13257{
13258 int i, j;
13259
13260 /*
13261 * RXE Kernel CSRs
13262 */
13263 write_csr(dd, RCV_CTRL, 0);
13264 init_rbufs(dd);
13265 /* RCV_STATUS read-only */
13266 /* RCV_CONTEXTS read-only */
13267 /* RCV_ARRAY_CNT read-only */
13268 /* RCV_BUF_SIZE read-only */
13269 write_csr(dd, RCV_BTH_QP, 0);
13270 write_csr(dd, RCV_MULTICAST, 0);
13271 write_csr(dd, RCV_BYPASS, 0);
13272 write_csr(dd, RCV_VL15, 0);
13273 /* this is a clear-down */
13274 write_csr(dd, RCV_ERR_INFO,
13275 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13276 /* RCV_ERR_STATUS read-only */
13277 write_csr(dd, RCV_ERR_MASK, 0);
13278 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13279 /* RCV_ERR_FORCE leave alone */
13280 for (i = 0; i < 32; i++)
13281 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13282 for (i = 0; i < 4; i++)
13283 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13284 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13285 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13286 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13287 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13288 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13289 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13290 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13291 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13292 }
13293 for (i = 0; i < 32; i++)
13294 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13295
13296 /*
13297 * RXE Kernel and User Per-Context CSRs
13298 */
13299 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13300 /* kernel */
13301 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13302 /* RCV_CTXT_STATUS read-only */
13303 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13304 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13305 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13306 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13307 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13308 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13309 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13310 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13311 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13312 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13313
13314 /* user */
13315 /* RCV_HDR_TAIL read-only */
13316 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13317 /* RCV_EGR_INDEX_TAIL read-only */
13318 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13319 /* RCV_EGR_OFFSET_TAIL read-only */
13320 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13321 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13322 0);
13323 }
13324 }
13325}
13326
13327/*
13328 * Set sc2vl tables.
13329 *
13330 * They power on to zeros, so to avoid send context errors
13331 * they need to be set:
13332 *
13333 * SC 0-7 -> VL 0-7 (respectively)
13334 * SC 15 -> VL 15
13335 * otherwise
13336 * -> VL 0
13337 */
13338static void init_sc2vl_tables(struct hfi1_devdata *dd)
13339{
13340 int i;
13341 /* init per architecture spec, constrained by hardware capability */
13342
13343 /* HFI maps sent packets */
13344 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13345 0,
13346 0, 0, 1, 1,
13347 2, 2, 3, 3,
13348 4, 4, 5, 5,
13349 6, 6, 7, 7));
13350 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13351 1,
13352 8, 0, 9, 0,
13353 10, 0, 11, 0,
13354 12, 0, 13, 0,
13355 14, 0, 15, 15));
13356 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13357 2,
13358 16, 0, 17, 0,
13359 18, 0, 19, 0,
13360 20, 0, 21, 0,
13361 22, 0, 23, 0));
13362 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13363 3,
13364 24, 0, 25, 0,
13365 26, 0, 27, 0,
13366 28, 0, 29, 0,
13367 30, 0, 31, 0));
13368
13369 /* DC maps received packets */
13370 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13371 15_0,
13372 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13373 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13374 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13375 31_16,
13376 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13377 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13378
13379 /* initialize the cached sc2vl values consistently with h/w */
13380 for (i = 0; i < 32; i++) {
13381 if (i < 8 || i == 15)
13382 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13383 else
13384 *((u8 *)(dd->sc2vl) + i) = 0;
13385 }
13386}
13387
13388/*
13389 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13390 * depend on the chip going through a power-on reset - a driver may be loaded
13391 * and unloaded many times.
13392 *
13393 * Do not write any CSR values to the chip in this routine - there may be
13394 * a reset following the (possible) FLR in this routine.
13395 *
13396 */
13397static void init_chip(struct hfi1_devdata *dd)
13398{
13399 int i;
13400
13401 /*
13402 * Put the HFI CSRs in a known state.
13403 * Combine this with a DC reset.
13404 *
13405 * Stop the device from doing anything while we do a
13406 * reset. We know there are no other active users of
13407 * the device since we are now in charge. Turn off
13408 * off all outbound and inbound traffic and make sure
13409 * the device does not generate any interrupts.
13410 */
13411
13412 /* disable send contexts and SDMA engines */
13413 write_csr(dd, SEND_CTRL, 0);
13414 for (i = 0; i < dd->chip_send_contexts; i++)
13415 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13416 for (i = 0; i < dd->chip_sdma_engines; i++)
13417 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13418 /* disable port (turn off RXE inbound traffic) and contexts */
13419 write_csr(dd, RCV_CTRL, 0);
13420 for (i = 0; i < dd->chip_rcv_contexts; i++)
13421 write_csr(dd, RCV_CTXT_CTRL, 0);
13422 /* mask all interrupt sources */
13423 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013424 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013425
13426 /*
13427 * DC Reset: do a full DC reset before the register clear.
13428 * A recommended length of time to hold is one CSR read,
13429 * so reread the CceDcCtrl. Then, hold the DC in reset
13430 * across the clear.
13431 */
13432 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013433 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013434
13435 if (use_flr) {
13436 /*
13437 * A FLR will reset the SPC core and part of the PCIe.
13438 * The parts that need to be restored have already been
13439 * saved.
13440 */
13441 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13442
13443 /* do the FLR, the DC reset will remain */
13444 hfi1_pcie_flr(dd);
13445
13446 /* restore command and BARs */
13447 restore_pci_variables(dd);
13448
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013449 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013450 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13451 hfi1_pcie_flr(dd);
13452 restore_pci_variables(dd);
13453 }
13454
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013455 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013456 } else {
13457 dd_dev_info(dd, "Resetting CSRs with writes\n");
13458 reset_cce_csrs(dd);
13459 reset_txe_csrs(dd);
13460 reset_rxe_csrs(dd);
13461 reset_asic_csrs(dd);
13462 reset_misc_csrs(dd);
13463 }
13464 /* clear the DC reset */
13465 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013466
Mike Marciniszyn77241052015-07-30 15:17:43 -040013467 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013468 setextled(dd, 0);
13469
Mike Marciniszyn77241052015-07-30 15:17:43 -040013470 /*
13471 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013472 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013473 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013474 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013475 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013476 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013477 * I2CCLK and I2CDAT will change per direction, and INT_N and
13478 * MODPRS_N are input only and their value is ignored.
13479 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013480 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13481 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013482}
13483
13484static void init_early_variables(struct hfi1_devdata *dd)
13485{
13486 int i;
13487
13488 /* assign link credit variables */
13489 dd->vau = CM_VAU;
13490 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013491 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013492 dd->link_credits--;
13493 dd->vcu = cu_to_vcu(hfi1_cu);
13494 /* enough room for 8 MAD packets plus header - 17K */
13495 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13496 if (dd->vl15_init > dd->link_credits)
13497 dd->vl15_init = dd->link_credits;
13498
13499 write_uninitialized_csrs_and_memories(dd);
13500
13501 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13502 for (i = 0; i < dd->num_pports; i++) {
13503 struct hfi1_pportdata *ppd = &dd->pport[i];
13504
13505 set_partition_keys(ppd);
13506 }
13507 init_sc2vl_tables(dd);
13508}
13509
13510static void init_kdeth_qp(struct hfi1_devdata *dd)
13511{
13512 /* user changed the KDETH_QP */
13513 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13514 /* out of range or illegal value */
13515 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13516 kdeth_qp = 0;
13517 }
13518 if (kdeth_qp == 0) /* not set, or failed range check */
13519 kdeth_qp = DEFAULT_KDETH_QP;
13520
13521 write_csr(dd, SEND_BTH_QP,
13522 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13523 << SEND_BTH_QP_KDETH_QP_SHIFT);
13524
13525 write_csr(dd, RCV_BTH_QP,
13526 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13527 << RCV_BTH_QP_KDETH_QP_SHIFT);
13528}
13529
13530/**
13531 * init_qpmap_table
13532 * @dd - device data
13533 * @first_ctxt - first context
13534 * @last_ctxt - first context
13535 *
13536 * This return sets the qpn mapping table that
13537 * is indexed by qpn[8:1].
13538 *
13539 * The routine will round robin the 256 settings
13540 * from first_ctxt to last_ctxt.
13541 *
13542 * The first/last looks ahead to having specialized
13543 * receive contexts for mgmt and bypass. Normal
13544 * verbs traffic will assumed to be on a range
13545 * of receive contexts.
13546 */
13547static void init_qpmap_table(struct hfi1_devdata *dd,
13548 u32 first_ctxt,
13549 u32 last_ctxt)
13550{
13551 u64 reg = 0;
13552 u64 regno = RCV_QP_MAP_TABLE;
13553 int i;
13554 u64 ctxt = first_ctxt;
13555
13556 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013557 reg |= ctxt << (8 * (i % 8));
13558 i++;
13559 ctxt++;
13560 if (ctxt > last_ctxt)
13561 ctxt = first_ctxt;
13562 if (i % 8 == 0) {
13563 write_csr(dd, regno, reg);
13564 reg = 0;
13565 regno += 8;
13566 }
13567 }
13568 if (i % 8)
13569 write_csr(dd, regno, reg);
13570
13571 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13572 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13573}
13574
13575/**
13576 * init_qos - init RX qos
13577 * @dd - device data
13578 * @first_context
13579 *
13580 * This routine initializes Rule 0 and the
13581 * RSM map table to implement qos.
13582 *
13583 * If all of the limit tests succeed,
13584 * qos is applied based on the array
13585 * interpretation of krcvqs where
13586 * entry 0 is VL0.
13587 *
13588 * The number of vl bits (n) and the number of qpn
13589 * bits (m) are computed to feed both the RSM map table
13590 * and the single rule.
13591 *
13592 */
13593static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13594{
13595 u8 max_by_vl = 0;
13596 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13597 u64 *rsmmap;
13598 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013599 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013600
13601 /* validate */
13602 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13603 num_vls == 1 ||
13604 krcvqsset <= 1)
13605 goto bail;
13606 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13607 if (krcvqs[i] > max_by_vl)
13608 max_by_vl = krcvqs[i];
13609 if (max_by_vl > 32)
13610 goto bail;
13611 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13612 /* determine bits vl */
13613 n = ilog2(num_vls);
13614 /* determine bits for qpn */
13615 m = ilog2(qpns_per_vl);
13616 if ((m + n) > 7)
13617 goto bail;
13618 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13619 goto bail;
13620 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013621 if (!rsmmap)
13622 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013623 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13624 /* init the local copy of the table */
13625 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13626 unsigned tctxt;
13627
13628 for (qpn = 0, tctxt = ctxt;
13629 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13630 unsigned idx, regoff, regidx;
13631
13632 /* generate index <= 128 */
13633 idx = (qpn << n) ^ i;
13634 regoff = (idx % 8) * 8;
13635 regidx = idx / 8;
13636 reg = rsmmap[regidx];
13637 /* replace 0xff with context number */
13638 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13639 << regoff);
13640 reg |= (u64)(tctxt++) << regoff;
13641 rsmmap[regidx] = reg;
13642 if (tctxt == ctxt + krcvqs[i])
13643 tctxt = ctxt;
13644 }
13645 ctxt += krcvqs[i];
13646 }
13647 /* flush cached copies to chip */
13648 for (i = 0; i < NUM_MAP_REGS; i++)
13649 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13650 /* add rule0 */
13651 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13652 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13653 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13654 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13655 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13656 LRH_BTH_MATCH_OFFSET
13657 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13658 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13659 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13660 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13661 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13662 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13663 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13664 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13665 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13666 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13667 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13668 /* Enable RSM */
13669 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13670 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013671 /* map everything else to first context */
13672 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013673 dd->qos_shift = n + 1;
13674 return;
13675bail:
13676 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013677 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013678}
13679
13680static void init_rxe(struct hfi1_devdata *dd)
13681{
13682 /* enable all receive errors */
13683 write_csr(dd, RCV_ERR_MASK, ~0ull);
13684 /* setup QPN map table - start where VL15 context leaves off */
13685 init_qos(
13686 dd,
13687 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13688 /*
13689 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13690 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13691 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13692 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13693 * Max_PayLoad_Size set to its minimum of 128.
13694 *
13695 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13696 * (64 bytes). Max_Payload_Size is possibly modified upward in
13697 * tune_pcie_caps() which is called after this routine.
13698 */
13699}
13700
13701static void init_other(struct hfi1_devdata *dd)
13702{
13703 /* enable all CCE errors */
13704 write_csr(dd, CCE_ERR_MASK, ~0ull);
13705 /* enable *some* Misc errors */
13706 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13707 /* enable all DC errors, except LCB */
13708 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13709 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13710}
13711
13712/*
13713 * Fill out the given AU table using the given CU. A CU is defined in terms
13714 * AUs. The table is a an encoding: given the index, how many AUs does that
13715 * represent?
13716 *
13717 * NOTE: Assumes that the register layout is the same for the
13718 * local and remote tables.
13719 */
13720static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13721 u32 csr0to3, u32 csr4to7)
13722{
13723 write_csr(dd, csr0to3,
13724 0ull <<
13725 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13726 | 1ull <<
13727 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13728 | 2ull * cu <<
13729 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13730 | 4ull * cu <<
13731 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13732 write_csr(dd, csr4to7,
13733 8ull * cu <<
13734 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13735 | 16ull * cu <<
13736 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13737 | 32ull * cu <<
13738 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13739 | 64ull * cu <<
13740 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013741}
13742
13743static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13744{
13745 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13746 SEND_CM_LOCAL_AU_TABLE4_TO7);
13747}
13748
13749void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13750{
13751 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13752 SEND_CM_REMOTE_AU_TABLE4_TO7);
13753}
13754
13755static void init_txe(struct hfi1_devdata *dd)
13756{
13757 int i;
13758
13759 /* enable all PIO, SDMA, general, and Egress errors */
13760 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13761 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13762 write_csr(dd, SEND_ERR_MASK, ~0ull);
13763 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13764
13765 /* enable all per-context and per-SDMA engine errors */
13766 for (i = 0; i < dd->chip_send_contexts; i++)
13767 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13768 for (i = 0; i < dd->chip_sdma_engines; i++)
13769 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13770
13771 /* set the local CU to AU mapping */
13772 assign_local_cm_au_table(dd, dd->vcu);
13773
13774 /*
13775 * Set reasonable default for Credit Return Timer
13776 * Don't set on Simulator - causes it to choke.
13777 */
13778 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13779 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13780}
13781
13782int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13783{
13784 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13785 unsigned sctxt;
13786 int ret = 0;
13787 u64 reg;
13788
13789 if (!rcd || !rcd->sc) {
13790 ret = -EINVAL;
13791 goto done;
13792 }
13793 sctxt = rcd->sc->hw_context;
13794 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13795 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13796 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13797 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13798 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13799 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13800 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13801 /*
13802 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013803 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013804 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013805 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13806 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13807 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13808 }
13809
13810 /* Enable J_KEY check on receive context. */
13811 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13812 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13813 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13814 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13815done:
13816 return ret;
13817}
13818
13819int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13820{
13821 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13822 unsigned sctxt;
13823 int ret = 0;
13824 u64 reg;
13825
13826 if (!rcd || !rcd->sc) {
13827 ret = -EINVAL;
13828 goto done;
13829 }
13830 sctxt = rcd->sc->hw_context;
13831 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13832 /*
13833 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13834 * This check would not have been enabled for A0 h/w, see
13835 * set_ctxt_jkey().
13836 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013837 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013838 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13839 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13840 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13841 }
13842 /* Turn off the J_KEY on the receive side */
13843 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13844done:
13845 return ret;
13846}
13847
13848int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13849{
13850 struct hfi1_ctxtdata *rcd;
13851 unsigned sctxt;
13852 int ret = 0;
13853 u64 reg;
13854
13855 if (ctxt < dd->num_rcv_contexts)
13856 rcd = dd->rcd[ctxt];
13857 else {
13858 ret = -EINVAL;
13859 goto done;
13860 }
13861 if (!rcd || !rcd->sc) {
13862 ret = -EINVAL;
13863 goto done;
13864 }
13865 sctxt = rcd->sc->hw_context;
13866 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13867 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13868 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13869 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13870 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13871 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13872done:
13873 return ret;
13874}
13875
13876int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13877{
13878 struct hfi1_ctxtdata *rcd;
13879 unsigned sctxt;
13880 int ret = 0;
13881 u64 reg;
13882
13883 if (ctxt < dd->num_rcv_contexts)
13884 rcd = dd->rcd[ctxt];
13885 else {
13886 ret = -EINVAL;
13887 goto done;
13888 }
13889 if (!rcd || !rcd->sc) {
13890 ret = -EINVAL;
13891 goto done;
13892 }
13893 sctxt = rcd->sc->hw_context;
13894 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13895 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13896 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13897 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13898done:
13899 return ret;
13900}
13901
13902/*
13903 * Start doing the clean up the the chip. Our clean up happens in multiple
13904 * stages and this is just the first.
13905 */
13906void hfi1_start_cleanup(struct hfi1_devdata *dd)
13907{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013908 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013909 free_cntrs(dd);
13910 free_rcverr(dd);
13911 clean_up_interrupts(dd);
13912}
13913
13914#define HFI_BASE_GUID(dev) \
13915 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13916
13917/*
13918 * Certain chip functions need to be initialized only once per asic
13919 * instead of per-device. This function finds the peer device and
13920 * checks whether that chip initialization needs to be done by this
13921 * device.
13922 */
13923static void asic_should_init(struct hfi1_devdata *dd)
13924{
13925 unsigned long flags;
13926 struct hfi1_devdata *tmp, *peer = NULL;
13927
13928 spin_lock_irqsave(&hfi1_devs_lock, flags);
13929 /* Find our peer device */
13930 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13931 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13932 dd->unit != tmp->unit) {
13933 peer = tmp;
13934 break;
13935 }
13936 }
13937
13938 /*
13939 * "Claim" the ASIC for initialization if it hasn't been
13940 " "claimed" yet.
13941 */
13942 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13943 dd->flags |= HFI1_DO_INIT_ASIC;
13944 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13945}
13946
Dean Luick5d9157a2015-11-16 21:59:34 -050013947/*
13948 * Set dd->boardname. Use a generic name if a name is not returned from
13949 * EFI variable space.
13950 *
13951 * Return 0 on success, -ENOMEM if space could not be allocated.
13952 */
13953static int obtain_boardname(struct hfi1_devdata *dd)
13954{
13955 /* generic board description */
13956 const char generic[] =
13957 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13958 unsigned long size;
13959 int ret;
13960
13961 ret = read_hfi1_efi_var(dd, "description", &size,
13962 (void **)&dd->boardname);
13963 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080013964 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050013965 /* use generic description */
13966 dd->boardname = kstrdup(generic, GFP_KERNEL);
13967 if (!dd->boardname)
13968 return -ENOMEM;
13969 }
13970 return 0;
13971}
13972
Mike Marciniszyn77241052015-07-30 15:17:43 -040013973/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013974 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013975 * @dev: the pci_dev for hfi1_ib device
13976 * @ent: pci_device_id struct for this dev
13977 *
13978 * Also allocates, initializes, and returns the devdata struct for this
13979 * device instance
13980 *
13981 * This is global, and is called directly at init to set up the
13982 * chip-specific function pointers for later use.
13983 */
13984struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13985 const struct pci_device_id *ent)
13986{
13987 struct hfi1_devdata *dd;
13988 struct hfi1_pportdata *ppd;
13989 u64 reg;
13990 int i, ret;
13991 static const char * const inames[] = { /* implementation names */
13992 "RTL silicon",
13993 "RTL VCS simulation",
13994 "RTL FPGA emulation",
13995 "Functional simulator"
13996 };
13997
13998 dd = hfi1_alloc_devdata(pdev,
13999 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
14000 if (IS_ERR(dd))
14001 goto bail;
14002 ppd = dd->pport;
14003 for (i = 0; i < dd->num_pports; i++, ppd++) {
14004 int vl;
14005 /* init common fields */
14006 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14007 /* DC supports 4 link widths */
14008 ppd->link_width_supported =
14009 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14010 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14011 ppd->link_width_downgrade_supported =
14012 ppd->link_width_supported;
14013 /* start out enabling only 4X */
14014 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14015 ppd->link_width_downgrade_enabled =
14016 ppd->link_width_downgrade_supported;
14017 /* link width active is 0 when link is down */
14018 /* link width downgrade active is 0 when link is down */
14019
Jubin Johnd0d236e2016-02-14 20:20:15 -080014020 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14021 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014022 hfi1_early_err(&pdev->dev,
14023 "Invalid num_vls %u, using %u VLs\n",
14024 num_vls, HFI1_MAX_VLS_SUPPORTED);
14025 num_vls = HFI1_MAX_VLS_SUPPORTED;
14026 }
14027 ppd->vls_supported = num_vls;
14028 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014029 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014030 /* Set the default MTU. */
14031 for (vl = 0; vl < num_vls; vl++)
14032 dd->vld[vl].mtu = hfi1_max_mtu;
14033 dd->vld[15].mtu = MAX_MAD_PACKET;
14034 /*
14035 * Set the initial values to reasonable default, will be set
14036 * for real when link is up.
14037 */
14038 ppd->lstate = IB_PORT_DOWN;
14039 ppd->overrun_threshold = 0x4;
14040 ppd->phy_error_threshold = 0xf;
14041 ppd->port_crc_mode_enabled = link_crc_mask;
14042 /* initialize supported LTP CRC mode */
14043 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14044 /* initialize enabled LTP CRC mode */
14045 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14046 /* start in offline */
14047 ppd->host_link_state = HLS_DN_OFFLINE;
14048 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014049 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014050 }
14051
14052 dd->link_default = HLS_DN_POLL;
14053
14054 /*
14055 * Do remaining PCIe setup and save PCIe values in dd.
14056 * Any error printing is already done by the init code.
14057 * On return, we have the chip mapped.
14058 */
14059 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14060 if (ret < 0)
14061 goto bail_free;
14062
14063 /* verify that reads actually work, save revision for reset check */
14064 dd->revision = read_csr(dd, CCE_REVISION);
14065 if (dd->revision == ~(u64)0) {
14066 dd_dev_err(dd, "cannot read chip CSRs\n");
14067 ret = -EINVAL;
14068 goto bail_cleanup;
14069 }
14070 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14071 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14072 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14073 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14074
Jubin John4d114fd2016-02-14 20:21:43 -080014075 /*
14076 * obtain the hardware ID - NOT related to unit, which is a
14077 * software enumeration
14078 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014079 reg = read_csr(dd, CCE_REVISION2);
14080 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14081 & CCE_REVISION2_HFI_ID_MASK;
14082 /* the variable size will remove unwanted bits */
14083 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14084 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14085 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14086 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
14087 (int)dd->irev);
14088
14089 /* speeds the hardware can support */
14090 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14091 /* speeds allowed to run at */
14092 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14093 /* give a reasonable active value, will be set on link up */
14094 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14095
14096 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14097 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14098 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14099 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14100 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14101 /* fix up link widths for emulation _p */
14102 ppd = dd->pport;
14103 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14104 ppd->link_width_supported =
14105 ppd->link_width_enabled =
14106 ppd->link_width_downgrade_supported =
14107 ppd->link_width_downgrade_enabled =
14108 OPA_LINK_WIDTH_1X;
14109 }
14110 /* insure num_vls isn't larger than number of sdma engines */
14111 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14112 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014113 num_vls, dd->chip_sdma_engines);
14114 num_vls = dd->chip_sdma_engines;
14115 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014116 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014117 }
14118
14119 /*
14120 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14121 * Limit the max if larger than the field holds. If timeout is
14122 * non-zero, then the calculated field will be at least 1.
14123 *
14124 * Must be after icode is set up - the cclock rate depends
14125 * on knowing the hardware being used.
14126 */
14127 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14128 if (dd->rcv_intr_timeout_csr >
14129 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14130 dd->rcv_intr_timeout_csr =
14131 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14132 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14133 dd->rcv_intr_timeout_csr = 1;
14134
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014135 /* needs to be done before we look for the peer device */
14136 read_guid(dd);
14137
14138 /* should this device init the ASIC block? */
14139 asic_should_init(dd);
14140
Mike Marciniszyn77241052015-07-30 15:17:43 -040014141 /* obtain chip sizes, reset chip CSRs */
14142 init_chip(dd);
14143
14144 /* read in the PCIe link speed information */
14145 ret = pcie_speeds(dd);
14146 if (ret)
14147 goto bail_cleanup;
14148
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014149 /* Needs to be called before hfi1_firmware_init */
14150 get_platform_config(dd);
14151
Mike Marciniszyn77241052015-07-30 15:17:43 -040014152 /* read in firmware */
14153 ret = hfi1_firmware_init(dd);
14154 if (ret)
14155 goto bail_cleanup;
14156
14157 /*
14158 * In general, the PCIe Gen3 transition must occur after the
14159 * chip has been idled (so it won't initiate any PCIe transactions
14160 * e.g. an interrupt) and before the driver changes any registers
14161 * (the transition will reset the registers).
14162 *
14163 * In particular, place this call after:
14164 * - init_chip() - the chip will not initiate any PCIe transactions
14165 * - pcie_speeds() - reads the current link speed
14166 * - hfi1_firmware_init() - the needed firmware is ready to be
14167 * downloaded
14168 */
14169 ret = do_pcie_gen3_transition(dd);
14170 if (ret)
14171 goto bail_cleanup;
14172
14173 /* start setting dd values and adjusting CSRs */
14174 init_early_variables(dd);
14175
14176 parse_platform_config(dd);
14177
Dean Luick5d9157a2015-11-16 21:59:34 -050014178 ret = obtain_boardname(dd);
14179 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014180 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014181
14182 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014183 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014184 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014185 (u32)dd->majrev,
14186 (u32)dd->minrev,
14187 (dd->revision >> CCE_REVISION_SW_SHIFT)
14188 & CCE_REVISION_SW_MASK);
14189
14190 ret = set_up_context_variables(dd);
14191 if (ret)
14192 goto bail_cleanup;
14193
14194 /* set initial RXE CSRs */
14195 init_rxe(dd);
14196 /* set initial TXE CSRs */
14197 init_txe(dd);
14198 /* set initial non-RXE, non-TXE CSRs */
14199 init_other(dd);
14200 /* set up KDETH QP prefix in both RX and TX CSRs */
14201 init_kdeth_qp(dd);
14202
Mitko Haralanov957558c2016-02-03 14:33:40 -080014203 ret = hfi1_dev_affinity_init(dd);
14204 if (ret)
14205 goto bail_cleanup;
14206
Mike Marciniszyn77241052015-07-30 15:17:43 -040014207 /* send contexts must be set up before receive contexts */
14208 ret = init_send_contexts(dd);
14209 if (ret)
14210 goto bail_cleanup;
14211
14212 ret = hfi1_create_ctxts(dd);
14213 if (ret)
14214 goto bail_cleanup;
14215
14216 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14217 /*
14218 * rcd[0] is guaranteed to be valid by this point. Also, all
14219 * context are using the same value, as per the module parameter.
14220 */
14221 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14222
14223 ret = init_pervl_scs(dd);
14224 if (ret)
14225 goto bail_cleanup;
14226
14227 /* sdma init */
14228 for (i = 0; i < dd->num_pports; ++i) {
14229 ret = sdma_init(dd, i);
14230 if (ret)
14231 goto bail_cleanup;
14232 }
14233
14234 /* use contexts created by hfi1_create_ctxts */
14235 ret = set_up_interrupts(dd);
14236 if (ret)
14237 goto bail_cleanup;
14238
14239 /* set up LCB access - must be after set_up_interrupts() */
14240 init_lcb_access(dd);
14241
14242 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14243 dd->base_guid & 0xFFFFFF);
14244
14245 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14246 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14247 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14248
14249 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14250 if (ret)
14251 goto bail_clear_intr;
14252 check_fabric_firmware_versions(dd);
14253
14254 thermal_init(dd);
14255
14256 ret = init_cntrs(dd);
14257 if (ret)
14258 goto bail_clear_intr;
14259
14260 ret = init_rcverr(dd);
14261 if (ret)
14262 goto bail_free_cntrs;
14263
14264 ret = eprom_init(dd);
14265 if (ret)
14266 goto bail_free_rcverr;
14267
14268 goto bail;
14269
14270bail_free_rcverr:
14271 free_rcverr(dd);
14272bail_free_cntrs:
14273 free_cntrs(dd);
14274bail_clear_intr:
14275 clean_up_interrupts(dd);
14276bail_cleanup:
14277 hfi1_pcie_ddcleanup(dd);
14278bail_free:
14279 hfi1_free_devdata(dd);
14280 dd = ERR_PTR(ret);
14281bail:
14282 return dd;
14283}
14284
14285static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14286 u32 dw_len)
14287{
14288 u32 delta_cycles;
14289 u32 current_egress_rate = ppd->current_egress_rate;
14290 /* rates here are in units of 10^6 bits/sec */
14291
14292 if (desired_egress_rate == -1)
14293 return 0; /* shouldn't happen */
14294
14295 if (desired_egress_rate >= current_egress_rate)
14296 return 0; /* we can't help go faster, only slower */
14297
14298 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14299 egress_cycles(dw_len * 4, current_egress_rate);
14300
14301 return (u16)delta_cycles;
14302}
14303
Mike Marciniszyn77241052015-07-30 15:17:43 -040014304/**
14305 * create_pbc - build a pbc for transmission
14306 * @flags: special case flags or-ed in built pbc
14307 * @srate: static rate
14308 * @vl: vl
14309 * @dwlen: dword length (header words + data words + pbc words)
14310 *
14311 * Create a PBC with the given flags, rate, VL, and length.
14312 *
14313 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14314 * for verbs, which does not use this PSM feature. The lone other caller
14315 * is for the diagnostic interface which calls this if the user does not
14316 * supply their own PBC.
14317 */
14318u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14319 u32 dw_len)
14320{
14321 u64 pbc, delay = 0;
14322
14323 if (unlikely(srate_mbs))
14324 delay = delay_cycles(ppd, srate_mbs, dw_len);
14325
14326 pbc = flags
14327 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14328 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14329 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14330 | (dw_len & PBC_LENGTH_DWS_MASK)
14331 << PBC_LENGTH_DWS_SHIFT;
14332
14333 return pbc;
14334}
14335
14336#define SBUS_THERMAL 0x4f
14337#define SBUS_THERM_MONITOR_MODE 0x1
14338
14339#define THERM_FAILURE(dev, ret, reason) \
14340 dd_dev_err((dd), \
14341 "Thermal sensor initialization failed: %s (%d)\n", \
14342 (reason), (ret))
14343
14344/*
14345 * Initialize the Avago Thermal sensor.
14346 *
14347 * After initialization, enable polling of thermal sensor through
14348 * SBus interface. In order for this to work, the SBus Master
14349 * firmware has to be loaded due to the fact that the HW polling
14350 * logic uses SBus interrupts, which are not supported with
14351 * default firmware. Otherwise, no data will be returned through
14352 * the ASIC_STS_THERM CSR.
14353 */
14354static int thermal_init(struct hfi1_devdata *dd)
14355{
14356 int ret = 0;
14357
14358 if (dd->icode != ICODE_RTL_SILICON ||
14359 !(dd->flags & HFI1_DO_INIT_ASIC))
14360 return ret;
14361
14362 acquire_hw_mutex(dd);
14363 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014364 /* Disable polling of thermal readings */
14365 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14366 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014367 /* Thermal Sensor Initialization */
14368 /* Step 1: Reset the Thermal SBus Receiver */
14369 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14370 RESET_SBUS_RECEIVER, 0);
14371 if (ret) {
14372 THERM_FAILURE(dd, ret, "Bus Reset");
14373 goto done;
14374 }
14375 /* Step 2: Set Reset bit in Thermal block */
14376 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14377 WRITE_SBUS_RECEIVER, 0x1);
14378 if (ret) {
14379 THERM_FAILURE(dd, ret, "Therm Block Reset");
14380 goto done;
14381 }
14382 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14383 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14384 WRITE_SBUS_RECEIVER, 0x32);
14385 if (ret) {
14386 THERM_FAILURE(dd, ret, "Write Clock Div");
14387 goto done;
14388 }
14389 /* Step 4: Select temperature mode */
14390 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14391 WRITE_SBUS_RECEIVER,
14392 SBUS_THERM_MONITOR_MODE);
14393 if (ret) {
14394 THERM_FAILURE(dd, ret, "Write Mode Sel");
14395 goto done;
14396 }
14397 /* Step 5: De-assert block reset and start conversion */
14398 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14399 WRITE_SBUS_RECEIVER, 0x2);
14400 if (ret) {
14401 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14402 goto done;
14403 }
14404 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14405 msleep(22);
14406
14407 /* Enable polling of thermal readings */
14408 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14409done:
14410 release_hw_mutex(dd);
14411 return ret;
14412}
14413
14414static void handle_temp_err(struct hfi1_devdata *dd)
14415{
14416 struct hfi1_pportdata *ppd = &dd->pport[0];
14417 /*
14418 * Thermal Critical Interrupt
14419 * Put the device into forced freeze mode, take link down to
14420 * offline, and put DC into reset.
14421 */
14422 dd_dev_emerg(dd,
14423 "Critical temperature reached! Forcing device into freeze mode!\n");
14424 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014425 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014426 /*
14427 * Shut DC down as much and as quickly as possible.
14428 *
14429 * Step 1: Take the link down to OFFLINE. This will cause the
14430 * 8051 to put the Serdes in reset. However, we don't want to
14431 * go through the entire link state machine since we want to
14432 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14433 * but rather an attempt to save the chip.
14434 * Code below is almost the same as quiet_serdes() but avoids
14435 * all the extra work and the sleeps.
14436 */
14437 ppd->driver_link_ready = 0;
14438 ppd->link_enabled = 0;
14439 set_physical_link_state(dd, PLS_OFFLINE |
14440 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14441 /*
14442 * Step 2: Shutdown LCB and 8051
14443 * After shutdown, do not restore DC_CFG_RESET value.
14444 */
14445 dc_shutdown(dd);
14446}