blob: 3577042423b2d4de9706716b8af30937358eb06a [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080068#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040069
70#define NUM_IB_PORTS 1
71
72uint kdeth_qp;
73module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75
76uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77module_param(num_vls, uint, S_IRUGO);
78MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79
80/*
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
86 */
87uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88module_param(rcv_intr_timeout, uint, S_IRUGO);
89MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90
91uint rcv_intr_count = 16; /* same as qib */
92module_param(rcv_intr_count, uint, S_IRUGO);
93MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94
95ushort link_crc_mask = SUPPORTED_CRCS;
96module_param(link_crc_mask, ushort, S_IRUGO);
97MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98
99uint loopback;
100module_param_named(loopback, loopback, uint, S_IRUGO);
101MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102
103/* Other driver tunables */
104uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105static ushort crc_14b_sideband = 1;
106static uint use_flr = 1;
107uint quick_linkup; /* skip LNI */
108
109struct flag_table {
110 u64 flag; /* the flag */
111 char *str; /* description string */
112 u16 extra; /* extra information */
113 u16 unused0;
114 u32 unused1;
115};
116
117/* str must be a string constant */
118#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119#define FLAG_ENTRY0(str, flag) {flag, str, 0}
120
121/* Send Error Consequences */
122#define SEC_WRITE_DROPPED 0x1
123#define SEC_PACKET_DROPPED 0x2
124#define SEC_SC_HALTED 0x4 /* per-context only */
125#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400129#define NUM_MAP_REGS 32
130
131/* Bit offset into the GUID which carries HFI id information */
132#define GUID_HFI_INDEX_SHIFT 39
133
134/* extract the emulation revision */
135#define emulator_rev(dd) ((dd)->irev >> 8)
136/* parallel and serial emulation versions are 3 and 4 respectively */
137#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
139
140/* RSM fields */
141
142/* packet type */
143#define IB_PACKET_TYPE 2ull
144#define QW_SHIFT 6ull
145/* QPN[7..1] */
146#define QPN_WIDTH 7ull
147
148/* LRH.BTH: QW 0, OFFSET 48 - for match */
149#define LRH_BTH_QW 0ull
150#define LRH_BTH_BIT_OFFSET 48ull
151#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153#define LRH_BTH_SELECT
154#define LRH_BTH_MASK 3ull
155#define LRH_BTH_VALUE 2ull
156
157/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158#define LRH_SC_QW 0ull
159#define LRH_SC_BIT_OFFSET 56ull
160#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162#define LRH_SC_MASK 128ull
163#define LRH_SC_VALUE 0ull
164
165/* SC[n..0] QW 0, OFFSET 60 - for select */
166#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167
168/* QPN[m+n:1] QW 1, OFFSET 1 */
169#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170
171/* defines to build power on SC2VL table */
172#define SC2VL_VAL( \
173 num, \
174 sc0, sc0val, \
175 sc1, sc1val, \
176 sc2, sc2val, \
177 sc3, sc3val, \
178 sc4, sc4val, \
179 sc5, sc5val, \
180 sc6, sc6val, \
181 sc7, sc7val) \
182( \
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191)
192
193#define DC_SC_VL_VAL( \
194 range, \
195 e0, e0val, \
196 e1, e1val, \
197 e2, e2val, \
198 e3, e3val, \
199 e4, e4val, \
200 e5, e5val, \
201 e6, e6val, \
202 e7, e7val, \
203 e8, e8val, \
204 e9, e9val, \
205 e10, e10val, \
206 e11, e11val, \
207 e12, e12val, \
208 e13, e13val, \
209 e14, e14val, \
210 e15, e15val) \
211( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228)
229
230/* all CceStatus sub-block freeze bits */
231#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235/* all CceStatus sub-block TXE pause bits */
236#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239/* all CceStatus sub-block RXE pause bits */
240#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
241
242/*
243 * CCE Error flags.
244 */
245static struct flag_table cce_err_status_flags[] = {
246/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
248/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
250/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
252/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
254/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
256/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
258/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
260/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
262/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
266/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
268/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
270/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
272/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
274/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
276/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
278/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
280/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
282/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
284/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
286/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
288/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
290/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
292/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
294/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
296/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
298/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
300/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
302/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
304/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
306/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
308/*31*/ FLAG_ENTRY0("LATriggered",
309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
310/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
312/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
314/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
316/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
318/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
320/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
322/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
324/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
326/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
328/*41-63 reserved*/
329};
330
331/*
332 * Misc Error flags
333 */
334#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
335static struct flag_table misc_err_status_flags[] = {
336/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
337/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
338/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
339/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
340/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
341/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
342/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
343/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
344/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
345/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
346/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
347/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
348/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
349};
350
351/*
352 * TXE PIO Error flags and consequences
353 */
354static struct flag_table pio_err_status_flags[] = {
355/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
356 SEC_WRITE_DROPPED,
357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
358/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
359 SEC_SPC_FREEZE,
360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
361/* 2*/ FLAG_ENTRY("PioCsrParity",
362 SEC_SPC_FREEZE,
363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
364/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
365 SEC_SPC_FREEZE,
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
367/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
368 SEC_SPC_FREEZE,
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
370/* 5*/ FLAG_ENTRY("PioPccFifoParity",
371 SEC_SPC_FREEZE,
372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
373/* 6*/ FLAG_ENTRY("PioPecFifoParity",
374 SEC_SPC_FREEZE,
375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
376/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
377 SEC_SPC_FREEZE,
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
379/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
380 SEC_SPC_FREEZE,
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
382/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
383 SEC_SPC_FREEZE,
384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
385/*10*/ FLAG_ENTRY("PioSmPktResetParity",
386 SEC_SPC_FREEZE,
387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
388/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
389 SEC_SPC_FREEZE,
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
391/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
394/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
395 0,
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
397/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
398 0,
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
400/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
403/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
406/*17*/ FLAG_ENTRY("PioInitSmIn",
407 0,
408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
409/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
412/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
415/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
416 0,
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
418/*21*/ FLAG_ENTRY("PioWriteDataParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
421/*22*/ FLAG_ENTRY("PioStateMachine",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
424/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
425 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
427/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
428 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
430/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
431 SEC_SPC_FREEZE,
432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
433/*26*/ FLAG_ENTRY("PioVlfSopParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
436/*27*/ FLAG_ENTRY("PioVlFifoParity",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
439/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
440 SEC_SPC_FREEZE,
441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
442/*29*/ FLAG_ENTRY("PioPpmcSopLen",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
445/*30-31 reserved*/
446/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
449/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
450 SEC_SPC_FREEZE,
451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
452/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
453 SEC_SPC_FREEZE,
454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
455/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
456 SEC_SPC_FREEZE,
457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
458/*36-63 reserved*/
459};
460
461/* TXE PIO errors that cause an SPC freeze */
462#define ALL_PIO_FREEZE_ERR \
463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
492
493/*
494 * TXE SDMA Error flags
495 */
496static struct flag_table sdma_err_status_flags[] = {
497/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
499/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
501/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
503/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
505/*04-63 reserved*/
506};
507
508/* TXE SDMA errors that cause an SPC freeze */
509#define ALL_SDMA_FREEZE_ERR \
510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800514/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
515#define PORT_DISCARD_EGRESS_ERRS \
516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
519
Mike Marciniszyn77241052015-07-30 15:17:43 -0400520/*
521 * TXE Egress Error flags
522 */
523#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
524static struct flag_table egress_err_status_flags[] = {
525/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
526/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
527/* 2 reserved */
528/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
530/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
531/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
532/* 6 reserved */
533/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
534 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
535/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
536 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
537/* 9-10 reserved */
538/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
540/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
541/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
542/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
543/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
544/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
545 SEES(TX_SDMA0_DISALLOWED_PACKET)),
546/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
547 SEES(TX_SDMA1_DISALLOWED_PACKET)),
548/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
549 SEES(TX_SDMA2_DISALLOWED_PACKET)),
550/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
551 SEES(TX_SDMA3_DISALLOWED_PACKET)),
552/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
553 SEES(TX_SDMA4_DISALLOWED_PACKET)),
554/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
555 SEES(TX_SDMA5_DISALLOWED_PACKET)),
556/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
557 SEES(TX_SDMA6_DISALLOWED_PACKET)),
558/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
559 SEES(TX_SDMA7_DISALLOWED_PACKET)),
560/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
561 SEES(TX_SDMA8_DISALLOWED_PACKET)),
562/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
563 SEES(TX_SDMA9_DISALLOWED_PACKET)),
564/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
565 SEES(TX_SDMA10_DISALLOWED_PACKET)),
566/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
567 SEES(TX_SDMA11_DISALLOWED_PACKET)),
568/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
569 SEES(TX_SDMA12_DISALLOWED_PACKET)),
570/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
571 SEES(TX_SDMA13_DISALLOWED_PACKET)),
572/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
573 SEES(TX_SDMA14_DISALLOWED_PACKET)),
574/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
575 SEES(TX_SDMA15_DISALLOWED_PACKET)),
576/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
578/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
580/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
582/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
584/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
586/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
588/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
590/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
592/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
594/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
595/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
596/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
597/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
598/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
599/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
600/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
601/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
602/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
603/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
604/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
605/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
606/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
607/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
608/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
609/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
610/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
611/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
612/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
613/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
614/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
615/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
617/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
618 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
619};
620
621/*
622 * TXE Egress Error Info flags
623 */
624#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
625static struct flag_table egress_err_info_flags[] = {
626/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
627/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
628/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
630/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
631/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
632/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
633/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
634/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
635/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
636/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
637/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
638/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
639/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
640/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
641/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
642/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
643/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
644/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
645/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
646/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
647/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
648};
649
650/* TXE Egress errors that cause an SPC freeze */
651#define ALL_TXE_EGRESS_FREEZE_ERR \
652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
656 | SEES(TX_LAUNCH_CSR_PARITY) \
657 | SEES(TX_SBRD_CTL_CSR_PARITY) \
658 | SEES(TX_CONFIG_PARITY) \
659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
668 | SEES(TX_CREDIT_RETURN_PARITY))
669
670/*
671 * TXE Send error flags
672 */
673#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
674static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500675/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400676/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
677/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
678};
679
680/*
681 * TXE Send Context Error flags and consequences
682 */
683static struct flag_table sc_err_status_flags[] = {
684/* 0*/ FLAG_ENTRY("InconsistentSop",
685 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
687/* 1*/ FLAG_ENTRY("DisallowedPacket",
688 SEC_PACKET_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
690/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
691 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
693/* 3*/ FLAG_ENTRY("WriteOverflow",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
696/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
697 SEC_WRITE_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
699/* 5-63 reserved*/
700};
701
702/*
703 * RXE Receive Error flags
704 */
705#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
706static struct flag_table rxe_err_status_flags[] = {
707/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
708/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
709/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
710/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
711/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
712/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
713/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
714/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
715/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
716/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
717/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
718/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
719/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
720/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
721/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
722/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
723/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
725/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
726/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
727/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
728 RXES(RBUF_BLOCK_LIST_READ_UNC)),
729/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
730 RXES(RBUF_BLOCK_LIST_READ_COR)),
731/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
733/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
734 RXES(RBUF_CSR_QENT_CNT_PARITY)),
735/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
736 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
737/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
738 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
739/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
740/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
741/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
743/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
744/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
745/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
746/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
747/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
748/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
749/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
750/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
751 RXES(RBUF_FL_INITDONE_PARITY)),
752/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
754/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
755/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
756/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
757/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
758 RXES(LOOKUP_DES_PART1_UNC_COR)),
759/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
760 RXES(LOOKUP_DES_PART2_PARITY)),
761/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
762/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
763/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
764/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
765/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
766/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
767/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
768/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
769/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
770/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
771/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
772/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
773/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
774/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
775/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
776/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
777/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
778/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
779/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
780/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
781/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
782/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
783};
784
785/* RXE errors that will trigger an SPC freeze */
786#define ALL_RXE_FREEZE_ERR \
787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
831
832#define RXE_FREEZE_ABORT_MASK \
833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
836
837/*
838 * DCC Error Flags
839 */
840#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
841static struct flag_table dcc_err_flags[] = {
842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
888};
889
890/*
891 * LCB error flags
892 */
893#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
894static struct flag_table lcb_err_flags[] = {
895/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
896/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
897/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
898/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
899 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
900/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
901/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
902/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
903/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
904/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
905/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
906/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
907/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
908/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
909/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
911/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
912/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
913/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
914/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
915/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
916/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
918/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
919/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
920/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
921/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
922/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
923/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
924/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
925 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
926/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
927/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
929/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
930 LCBE(REDUNDANT_FLIT_PARITY_ERR))
931};
932
933/*
934 * DC8051 Error Flags
935 */
936#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
937static struct flag_table dc8051_err_flags[] = {
938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
949};
950
951/*
952 * DC8051 Information Error flags
953 *
954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
955 */
956static struct flag_table dc8051_info_err_flags[] = {
957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
960 FLAG_ENTRY0("Serdes internal loopback failure",
961 FAILED_SERDES_INTERNAL_LOOPBACK),
962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
989
990static u32 encoded_size(u32 size);
991static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
992static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
993static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
994 u8 *continuous);
995static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
996 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
997static void read_vc_remote_link_width(struct hfi1_devdata *dd,
998 u8 *remote_tx_rate, u16 *link_widths);
999static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1000 u8 *flag_bits, u16 *link_widths);
1001static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1002 u8 *device_rev);
1003static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1004static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1005static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1006 u8 *tx_polarity_inversion,
1007 u8 *rx_polarity_inversion, u8 *max_rate);
1008static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1009 unsigned int context, u64 err_status);
1010static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1011static void handle_dcc_err(struct hfi1_devdata *dd,
1012 unsigned int context, u64 err_status);
1013static void handle_lcb_err(struct hfi1_devdata *dd,
1014 unsigned int context, u64 err_status);
1015static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void set_partition_keys(struct hfi1_pportdata *);
1024static const char *link_state_name(u32 state);
1025static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1026 u32 state);
1027static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1028 u64 *out_data);
1029static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1030static int thermal_init(struct hfi1_devdata *dd);
1031
1032static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1033 int msecs);
1034static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1035static void handle_temp_err(struct hfi1_devdata *);
1036static void dc_shutdown(struct hfi1_devdata *);
1037static void dc_start(struct hfi1_devdata *);
1038
1039/*
1040 * Error interrupt table entry. This is used as input to the interrupt
1041 * "clear down" routine used for all second tier error interrupt register.
1042 * Second tier interrupt registers have a single bit representing them
1043 * in the top-level CceIntStatus.
1044 */
1045struct err_reg_info {
1046 u32 status; /* status CSR offset */
1047 u32 clear; /* clear CSR offset */
1048 u32 mask; /* mask CSR offset */
1049 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1050 const char *desc;
1051};
1052
1053#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1054#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1055#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1056
1057/*
1058 * Helpers for building HFI and DC error interrupt table entries. Different
1059 * helpers are needed because of inconsistent register names.
1060 */
1061#define EE(reg, handler, desc) \
1062 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1063 handler, desc }
1064#define DC_EE1(reg, handler, desc) \
1065 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1066#define DC_EE2(reg, handler, desc) \
1067 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1068
1069/*
1070 * Table of the "misc" grouping of error interrupts. Each entry refers to
1071 * another register containing more information.
1072 */
1073static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1074/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1075/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1076/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1077/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1078/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1079/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1080/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1081/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1082 /* the rest are reserved */
1083};
1084
1085/*
1086 * Index into the Various section of the interrupt sources
1087 * corresponding to the Critical Temperature interrupt.
1088 */
1089#define TCRIT_INT_SOURCE 4
1090
1091/*
1092 * SDMA error interrupt entry - refers to another register containing more
1093 * information.
1094 */
1095static const struct err_reg_info sdma_eng_err =
1096 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1097
1098static const struct err_reg_info various_err[NUM_VARIOUS] = {
1099/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1100/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1101/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1102/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1103/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1104 /* rest are reserved */
1105};
1106
1107/*
1108 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1109 * register can not be derived from the MTU value because 10K is not
1110 * a power of 2. Therefore, we need a constant. Everything else can
1111 * be calculated.
1112 */
1113#define DCC_CFG_PORT_MTU_CAP_10240 7
1114
1115/*
1116 * Table of the DC grouping of error interrupts. Each entry refers to
1117 * another register containing more information.
1118 */
1119static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1120/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1121/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1122/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1123/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1124 /* the rest are reserved */
1125};
1126
1127struct cntr_entry {
1128 /*
1129 * counter name
1130 */
1131 char *name;
1132
1133 /*
1134 * csr to read for name (if applicable)
1135 */
1136 u64 csr;
1137
1138 /*
1139 * offset into dd or ppd to store the counter's value
1140 */
1141 int offset;
1142
1143 /*
1144 * flags
1145 */
1146 u8 flags;
1147
1148 /*
1149 * accessor for stat element, context either dd or ppd
1150 */
1151 u64 (*rw_cntr)(const struct cntr_entry *,
1152 void *context,
1153 int vl,
1154 int mode,
1155 u64 data);
1156};
1157
1158#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1160
1161#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1162{ \
1163 name, \
1164 csr, \
1165 offset, \
1166 flags, \
1167 accessor \
1168}
1169
1170/* 32bit RXE */
1171#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1172CNTR_ELEM(#name, \
1173 (counter * 8 + RCV_COUNTER_ARRAY32), \
1174 0, flags | CNTR_32BIT, \
1175 port_access_u32_csr)
1176
1177#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1178CNTR_ELEM(#name, \
1179 (counter * 8 + RCV_COUNTER_ARRAY32), \
1180 0, flags | CNTR_32BIT, \
1181 dev_access_u32_csr)
1182
1183/* 64bit RXE */
1184#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186 (counter * 8 + RCV_COUNTER_ARRAY64), \
1187 0, flags, \
1188 port_access_u64_csr)
1189
1190#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1191CNTR_ELEM(#name, \
1192 (counter * 8 + RCV_COUNTER_ARRAY64), \
1193 0, flags, \
1194 dev_access_u64_csr)
1195
1196#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197#define OVR_ELM(ctx) \
1198CNTR_ELEM("RcvHdrOvr" #ctx, \
1199 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1200 0, CNTR_NORMAL, port_access_u64_csr)
1201
1202/* 32bit TXE */
1203#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1204CNTR_ELEM(#name, \
1205 (counter * 8 + SEND_COUNTER_ARRAY32), \
1206 0, flags | CNTR_32BIT, \
1207 port_access_u32_csr)
1208
1209/* 64bit TXE */
1210#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name, \
1212 (counter * 8 + SEND_COUNTER_ARRAY64), \
1213 0, flags, \
1214 port_access_u64_csr)
1215
1216# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1217CNTR_ELEM(#name,\
1218 counter * 8 + SEND_COUNTER_ARRAY64, \
1219 0, \
1220 flags, \
1221 dev_access_u64_csr)
1222
1223/* CCE */
1224#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226 (counter * 8 + CCE_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1228 dev_access_u32_csr)
1229
1230#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233 0, flags | CNTR_32BIT, \
1234 dev_access_u32_csr)
1235
1236/* DC */
1237#define DC_PERF_CNTR(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 counter, \
1240 0, \
1241 flags, \
1242 dev_access_u64_csr)
1243
1244#define DC_PERF_CNTR_LCB(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 counter, \
1247 0, \
1248 flags, \
1249 dc_access_lcb_cntr)
1250
1251/* ibp counters */
1252#define SW_IBP_CNTR(name, cntr) \
1253CNTR_ELEM(#name, \
1254 0, \
1255 0, \
1256 CNTR_SYNTH, \
1257 access_ibp_##cntr)
1258
1259u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1260{
1261 u64 val;
1262
1263 if (dd->flags & HFI1_PRESENT) {
1264 val = readq((void __iomem *)dd->kregbase + offset);
1265 return val;
1266 }
1267 return -1;
1268}
1269
1270void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1271{
1272 if (dd->flags & HFI1_PRESENT)
1273 writeq(value, (void __iomem *)dd->kregbase + offset);
1274}
1275
1276void __iomem *get_csr_addr(
1277 struct hfi1_devdata *dd,
1278 u32 offset)
1279{
1280 return (void __iomem *)dd->kregbase + offset;
1281}
1282
1283static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1284 int mode, u64 value)
1285{
1286 u64 ret;
1287
1288
1289 if (mode == CNTR_MODE_R) {
1290 ret = read_csr(dd, csr);
1291 } else if (mode == CNTR_MODE_W) {
1292 write_csr(dd, csr, value);
1293 ret = value;
1294 } else {
1295 dd_dev_err(dd, "Invalid cntr register access mode");
1296 return 0;
1297 }
1298
1299 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1300 return ret;
1301}
1302
1303/* Dev Access */
1304static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1305 void *context, int vl, int mode, u64 data)
1306{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301307 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001308 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001309
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001310 if (entry->flags & CNTR_SDMA) {
1311 if (vl == CNTR_INVALID_VL)
1312 return 0;
1313 csr += 0x100 * vl;
1314 } else {
1315 if (vl != CNTR_INVALID_VL)
1316 return 0;
1317 }
1318 return read_write_csr(dd, csr, mode, data);
1319}
1320
1321static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1322 void *context, int idx, int mode, u64 data)
1323{
1324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1325
1326 if (dd->per_sdma && idx < dd->num_sdma)
1327 return dd->per_sdma[idx].err_cnt;
1328 return 0;
1329}
1330
1331static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1332 void *context, int idx, int mode, u64 data)
1333{
1334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1335
1336 if (dd->per_sdma && idx < dd->num_sdma)
1337 return dd->per_sdma[idx].sdma_int_cnt;
1338 return 0;
1339}
1340
1341static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1342 void *context, int idx, int mode, u64 data)
1343{
1344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1345
1346 if (dd->per_sdma && idx < dd->num_sdma)
1347 return dd->per_sdma[idx].idle_int_cnt;
1348 return 0;
1349}
1350
1351static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1352 void *context, int idx, int mode,
1353 u64 data)
1354{
1355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1356
1357 if (dd->per_sdma && idx < dd->num_sdma)
1358 return dd->per_sdma[idx].progress_int_cnt;
1359 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001360}
1361
1362static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1363 int vl, int mode, u64 data)
1364{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301365 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001366
1367 u64 val = 0;
1368 u64 csr = entry->csr;
1369
1370 if (entry->flags & CNTR_VL) {
1371 if (vl == CNTR_INVALID_VL)
1372 return 0;
1373 csr += 8 * vl;
1374 } else {
1375 if (vl != CNTR_INVALID_VL)
1376 return 0;
1377 }
1378
1379 val = read_write_csr(dd, csr, mode, data);
1380 return val;
1381}
1382
1383static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1384 int vl, int mode, u64 data)
1385{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301386 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001387 u32 csr = entry->csr;
1388 int ret = 0;
1389
1390 if (vl != CNTR_INVALID_VL)
1391 return 0;
1392 if (mode == CNTR_MODE_R)
1393 ret = read_lcb_csr(dd, csr, &data);
1394 else if (mode == CNTR_MODE_W)
1395 ret = write_lcb_csr(dd, csr, data);
1396
1397 if (ret) {
1398 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1399 return 0;
1400 }
1401
1402 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1403 return data;
1404}
1405
1406/* Port Access */
1407static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1408 int vl, int mode, u64 data)
1409{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301410 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001411
1412 if (vl != CNTR_INVALID_VL)
1413 return 0;
1414 return read_write_csr(ppd->dd, entry->csr, mode, data);
1415}
1416
1417static u64 port_access_u64_csr(const struct cntr_entry *entry,
1418 void *context, int vl, int mode, u64 data)
1419{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301420 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001421 u64 val;
1422 u64 csr = entry->csr;
1423
1424 if (entry->flags & CNTR_VL) {
1425 if (vl == CNTR_INVALID_VL)
1426 return 0;
1427 csr += 8 * vl;
1428 } else {
1429 if (vl != CNTR_INVALID_VL)
1430 return 0;
1431 }
1432 val = read_write_csr(ppd->dd, csr, mode, data);
1433 return val;
1434}
1435
1436/* Software defined */
1437static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1438 u64 data)
1439{
1440 u64 ret;
1441
1442 if (mode == CNTR_MODE_R) {
1443 ret = *cntr;
1444 } else if (mode == CNTR_MODE_W) {
1445 *cntr = data;
1446 ret = data;
1447 } else {
1448 dd_dev_err(dd, "Invalid cntr sw access mode");
1449 return 0;
1450 }
1451
1452 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1453
1454 return ret;
1455}
1456
1457static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1458 int vl, int mode, u64 data)
1459{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301460 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001461
1462 if (vl != CNTR_INVALID_VL)
1463 return 0;
1464 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1465}
1466
1467static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1468 int vl, int mode, u64 data)
1469{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301470 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001471
1472 if (vl != CNTR_INVALID_VL)
1473 return 0;
1474 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1475}
1476
Dean Luick6d014532015-12-01 15:38:23 -05001477static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1478 void *context, int vl, int mode,
1479 u64 data)
1480{
1481 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1482
1483 if (vl != CNTR_INVALID_VL)
1484 return 0;
1485 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1486}
1487
Mike Marciniszyn77241052015-07-30 15:17:43 -04001488static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1489 void *context, int vl, int mode, u64 data)
1490{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001491 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1492 u64 zero = 0;
1493 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001494
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001495 if (vl == CNTR_INVALID_VL)
1496 counter = &ppd->port_xmit_discards;
1497 else if (vl >= 0 && vl < C_VL_COUNT)
1498 counter = &ppd->port_xmit_discards_vl[vl];
1499 else
1500 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001501
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001502 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001503}
1504
1505static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1506 void *context, int vl, int mode, u64 data)
1507{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301508 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001509
1510 if (vl != CNTR_INVALID_VL)
1511 return 0;
1512
1513 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1514 mode, data);
1515}
1516
1517static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1518 void *context, int vl, int mode, u64 data)
1519{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301520 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001521
1522 if (vl != CNTR_INVALID_VL)
1523 return 0;
1524
1525 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1526 mode, data);
1527}
1528
1529u64 get_all_cpu_total(u64 __percpu *cntr)
1530{
1531 int cpu;
1532 u64 counter = 0;
1533
1534 for_each_possible_cpu(cpu)
1535 counter += *per_cpu_ptr(cntr, cpu);
1536 return counter;
1537}
1538
1539static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1540 u64 __percpu *cntr,
1541 int vl, int mode, u64 data)
1542{
1543
1544 u64 ret = 0;
1545
1546 if (vl != CNTR_INVALID_VL)
1547 return 0;
1548
1549 if (mode == CNTR_MODE_R) {
1550 ret = get_all_cpu_total(cntr) - *z_val;
1551 } else if (mode == CNTR_MODE_W) {
1552 /* A write can only zero the counter */
1553 if (data == 0)
1554 *z_val = get_all_cpu_total(cntr);
1555 else
1556 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1557 } else {
1558 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1559 return 0;
1560 }
1561
1562 return ret;
1563}
1564
1565static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1566 void *context, int vl, int mode, u64 data)
1567{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301568 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001569
1570 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1571 mode, data);
1572}
1573
1574static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1575 void *context, int vl, int mode, u64 data)
1576{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301577 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001578
1579 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1580 mode, data);
1581}
1582
1583static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1584 void *context, int vl, int mode, u64 data)
1585{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301586 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001587
1588 return dd->verbs_dev.n_piowait;
1589}
1590
1591static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1592 void *context, int vl, int mode, u64 data)
1593{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301594 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001595
1596 return dd->verbs_dev.n_txwait;
1597}
1598
1599static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1600 void *context, int vl, int mode, u64 data)
1601{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301602 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001603
1604 return dd->verbs_dev.n_kmem_wait;
1605}
1606
Dean Luickb4219222015-10-26 10:28:35 -04001607static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1608 void *context, int vl, int mode, u64 data)
1609{
1610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1611
1612 return dd->verbs_dev.n_send_schedule;
1613}
1614
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001615/* Software counters for the error status bits within MISC_ERR_STATUS */
1616static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1617 void *context, int vl, int mode,
1618 u64 data)
1619{
1620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1621
1622 return dd->misc_err_status_cnt[12];
1623}
1624
1625static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1626 void *context, int vl, int mode,
1627 u64 data)
1628{
1629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1630
1631 return dd->misc_err_status_cnt[11];
1632}
1633
1634static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1635 void *context, int vl, int mode,
1636 u64 data)
1637{
1638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1639
1640 return dd->misc_err_status_cnt[10];
1641}
1642
1643static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1644 void *context, int vl,
1645 int mode, u64 data)
1646{
1647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1648
1649 return dd->misc_err_status_cnt[9];
1650}
1651
1652static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1653 void *context, int vl, int mode,
1654 u64 data)
1655{
1656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1657
1658 return dd->misc_err_status_cnt[8];
1659}
1660
1661static u64 access_misc_efuse_read_bad_addr_err_cnt(
1662 const struct cntr_entry *entry,
1663 void *context, int vl, int mode, u64 data)
1664{
1665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1666
1667 return dd->misc_err_status_cnt[7];
1668}
1669
1670static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1671 void *context, int vl,
1672 int mode, u64 data)
1673{
1674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1675
1676 return dd->misc_err_status_cnt[6];
1677}
1678
1679static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1680 void *context, int vl, int mode,
1681 u64 data)
1682{
1683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685 return dd->misc_err_status_cnt[5];
1686}
1687
1688static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1689 void *context, int vl, int mode,
1690 u64 data)
1691{
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693
1694 return dd->misc_err_status_cnt[4];
1695}
1696
1697static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1698 void *context, int vl,
1699 int mode, u64 data)
1700{
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1702
1703 return dd->misc_err_status_cnt[3];
1704}
1705
1706static u64 access_misc_csr_write_bad_addr_err_cnt(
1707 const struct cntr_entry *entry,
1708 void *context, int vl, int mode, u64 data)
1709{
1710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1711
1712 return dd->misc_err_status_cnt[2];
1713}
1714
1715static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1716 void *context, int vl,
1717 int mode, u64 data)
1718{
1719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1720
1721 return dd->misc_err_status_cnt[1];
1722}
1723
1724static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1725 void *context, int vl, int mode,
1726 u64 data)
1727{
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730 return dd->misc_err_status_cnt[0];
1731}
1732
1733/*
1734 * Software counter for the aggregate of
1735 * individual CceErrStatus counters
1736 */
1737static u64 access_sw_cce_err_status_aggregated_cnt(
1738 const struct cntr_entry *entry,
1739 void *context, int vl, int mode, u64 data)
1740{
1741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1742
1743 return dd->sw_cce_err_status_aggregate;
1744}
1745
1746/*
1747 * Software counters corresponding to each of the
1748 * error status bits within CceErrStatus
1749 */
1750static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1751 void *context, int vl, int mode,
1752 u64 data)
1753{
1754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756 return dd->cce_err_status_cnt[40];
1757}
1758
1759static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1760 void *context, int vl, int mode,
1761 u64 data)
1762{
1763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765 return dd->cce_err_status_cnt[39];
1766}
1767
1768static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1769 void *context, int vl, int mode,
1770 u64 data)
1771{
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774 return dd->cce_err_status_cnt[38];
1775}
1776
1777static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl, int mode,
1779 u64 data)
1780{
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783 return dd->cce_err_status_cnt[37];
1784}
1785
1786static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1788 u64 data)
1789{
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792 return dd->cce_err_status_cnt[36];
1793}
1794
1795static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1796 const struct cntr_entry *entry,
1797 void *context, int vl, int mode, u64 data)
1798{
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801 return dd->cce_err_status_cnt[35];
1802}
1803
1804static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1805 const struct cntr_entry *entry,
1806 void *context, int vl, int mode, u64 data)
1807{
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810 return dd->cce_err_status_cnt[34];
1811}
1812
1813static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1814 void *context, int vl,
1815 int mode, u64 data)
1816{
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819 return dd->cce_err_status_cnt[33];
1820}
1821
1822static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl, int mode,
1824 u64 data)
1825{
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828 return dd->cce_err_status_cnt[32];
1829}
1830
1831static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1832 void *context, int vl, int mode, u64 data)
1833{
1834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1835
1836 return dd->cce_err_status_cnt[31];
1837}
1838
1839static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1840 void *context, int vl, int mode,
1841 u64 data)
1842{
1843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1844
1845 return dd->cce_err_status_cnt[30];
1846}
1847
1848static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1849 void *context, int vl, int mode,
1850 u64 data)
1851{
1852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854 return dd->cce_err_status_cnt[29];
1855}
1856
1857static u64 access_pcic_transmit_back_parity_err_cnt(
1858 const struct cntr_entry *entry,
1859 void *context, int vl, int mode, u64 data)
1860{
1861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863 return dd->cce_err_status_cnt[28];
1864}
1865
1866static u64 access_pcic_transmit_front_parity_err_cnt(
1867 const struct cntr_entry *entry,
1868 void *context, int vl, int mode, u64 data)
1869{
1870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872 return dd->cce_err_status_cnt[27];
1873}
1874
1875static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1876 void *context, int vl, int mode,
1877 u64 data)
1878{
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881 return dd->cce_err_status_cnt[26];
1882}
1883
1884static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1886 u64 data)
1887{
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890 return dd->cce_err_status_cnt[25];
1891}
1892
1893static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1894 void *context, int vl, int mode,
1895 u64 data)
1896{
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899 return dd->cce_err_status_cnt[24];
1900}
1901
1902static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1903 void *context, int vl, int mode,
1904 u64 data)
1905{
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908 return dd->cce_err_status_cnt[23];
1909}
1910
1911static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1912 void *context, int vl,
1913 int mode, u64 data)
1914{
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917 return dd->cce_err_status_cnt[22];
1918}
1919
1920static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1921 void *context, int vl, int mode,
1922 u64 data)
1923{
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926 return dd->cce_err_status_cnt[21];
1927}
1928
1929static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1930 const struct cntr_entry *entry,
1931 void *context, int vl, int mode, u64 data)
1932{
1933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1934
1935 return dd->cce_err_status_cnt[20];
1936}
1937
1938static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1939 void *context, int vl,
1940 int mode, u64 data)
1941{
1942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943
1944 return dd->cce_err_status_cnt[19];
1945}
1946
1947static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1948 void *context, int vl, int mode,
1949 u64 data)
1950{
1951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952
1953 return dd->cce_err_status_cnt[18];
1954}
1955
1956static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1957 void *context, int vl, int mode,
1958 u64 data)
1959{
1960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961
1962 return dd->cce_err_status_cnt[17];
1963}
1964
1965static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1966 void *context, int vl, int mode,
1967 u64 data)
1968{
1969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970
1971 return dd->cce_err_status_cnt[16];
1972}
1973
1974static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1975 void *context, int vl, int mode,
1976 u64 data)
1977{
1978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979
1980 return dd->cce_err_status_cnt[15];
1981}
1982
1983static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1984 void *context, int vl,
1985 int mode, u64 data)
1986{
1987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988
1989 return dd->cce_err_status_cnt[14];
1990}
1991
1992static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1993 void *context, int vl, int mode,
1994 u64 data)
1995{
1996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997
1998 return dd->cce_err_status_cnt[13];
1999}
2000
2001static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2002 const struct cntr_entry *entry,
2003 void *context, int vl, int mode, u64 data)
2004{
2005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006
2007 return dd->cce_err_status_cnt[12];
2008}
2009
2010static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2011 const struct cntr_entry *entry,
2012 void *context, int vl, int mode, u64 data)
2013{
2014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015
2016 return dd->cce_err_status_cnt[11];
2017}
2018
2019static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2020 const struct cntr_entry *entry,
2021 void *context, int vl, int mode, u64 data)
2022{
2023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024
2025 return dd->cce_err_status_cnt[10];
2026}
2027
2028static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2029 const struct cntr_entry *entry,
2030 void *context, int vl, int mode, u64 data)
2031{
2032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033
2034 return dd->cce_err_status_cnt[9];
2035}
2036
2037static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2038 const struct cntr_entry *entry,
2039 void *context, int vl, int mode, u64 data)
2040{
2041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042
2043 return dd->cce_err_status_cnt[8];
2044}
2045
2046static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2047 void *context, int vl,
2048 int mode, u64 data)
2049{
2050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051
2052 return dd->cce_err_status_cnt[7];
2053}
2054
2055static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2056 const struct cntr_entry *entry,
2057 void *context, int vl, int mode, u64 data)
2058{
2059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060
2061 return dd->cce_err_status_cnt[6];
2062}
2063
2064static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2065 void *context, int vl, int mode,
2066 u64 data)
2067{
2068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069
2070 return dd->cce_err_status_cnt[5];
2071}
2072
2073static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2074 void *context, int vl, int mode,
2075 u64 data)
2076{
2077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078
2079 return dd->cce_err_status_cnt[4];
2080}
2081
2082static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2083 const struct cntr_entry *entry,
2084 void *context, int vl, int mode, u64 data)
2085{
2086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087
2088 return dd->cce_err_status_cnt[3];
2089}
2090
2091static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2092 void *context, int vl,
2093 int mode, u64 data)
2094{
2095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096
2097 return dd->cce_err_status_cnt[2];
2098}
2099
2100static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2101 void *context, int vl,
2102 int mode, u64 data)
2103{
2104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105
2106 return dd->cce_err_status_cnt[1];
2107}
2108
2109static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2110 void *context, int vl, int mode,
2111 u64 data)
2112{
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115 return dd->cce_err_status_cnt[0];
2116}
2117
2118/*
2119 * Software counters corresponding to each of the
2120 * error status bits within RcvErrStatus
2121 */
2122static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2123 void *context, int vl, int mode,
2124 u64 data)
2125{
2126 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2127
2128 return dd->rcv_err_status_cnt[63];
2129}
2130
2131static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2132 void *context, int vl,
2133 int mode, u64 data)
2134{
2135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2136
2137 return dd->rcv_err_status_cnt[62];
2138}
2139
2140static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2141 void *context, int vl, int mode,
2142 u64 data)
2143{
2144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2145
2146 return dd->rcv_err_status_cnt[61];
2147}
2148
2149static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2150 void *context, int vl, int mode,
2151 u64 data)
2152{
2153 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2154
2155 return dd->rcv_err_status_cnt[60];
2156}
2157
2158static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2159 void *context, int vl,
2160 int mode, u64 data)
2161{
2162 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2163
2164 return dd->rcv_err_status_cnt[59];
2165}
2166
2167static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2168 void *context, int vl,
2169 int mode, u64 data)
2170{
2171 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2172
2173 return dd->rcv_err_status_cnt[58];
2174}
2175
2176static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2177 void *context, int vl, int mode,
2178 u64 data)
2179{
2180 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2181
2182 return dd->rcv_err_status_cnt[57];
2183}
2184
2185static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2186 void *context, int vl, int mode,
2187 u64 data)
2188{
2189 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2190
2191 return dd->rcv_err_status_cnt[56];
2192}
2193
2194static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2195 void *context, int vl, int mode,
2196 u64 data)
2197{
2198 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2199
2200 return dd->rcv_err_status_cnt[55];
2201}
2202
2203static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2204 const struct cntr_entry *entry,
2205 void *context, int vl, int mode, u64 data)
2206{
2207 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2208
2209 return dd->rcv_err_status_cnt[54];
2210}
2211
2212static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2213 const struct cntr_entry *entry,
2214 void *context, int vl, int mode, u64 data)
2215{
2216 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2217
2218 return dd->rcv_err_status_cnt[53];
2219}
2220
2221static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2222 void *context, int vl,
2223 int mode, u64 data)
2224{
2225 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2226
2227 return dd->rcv_err_status_cnt[52];
2228}
2229
2230static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2231 void *context, int vl,
2232 int mode, u64 data)
2233{
2234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235
2236 return dd->rcv_err_status_cnt[51];
2237}
2238
2239static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2240 void *context, int vl,
2241 int mode, u64 data)
2242{
2243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244
2245 return dd->rcv_err_status_cnt[50];
2246}
2247
2248static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2249 void *context, int vl,
2250 int mode, u64 data)
2251{
2252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253
2254 return dd->rcv_err_status_cnt[49];
2255}
2256
2257static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2258 void *context, int vl,
2259 int mode, u64 data)
2260{
2261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262
2263 return dd->rcv_err_status_cnt[48];
2264}
2265
2266static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2267 void *context, int vl,
2268 int mode, u64 data)
2269{
2270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271
2272 return dd->rcv_err_status_cnt[47];
2273}
2274
2275static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2276 void *context, int vl, int mode,
2277 u64 data)
2278{
2279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280
2281 return dd->rcv_err_status_cnt[46];
2282}
2283
2284static u64 access_rx_hq_intr_csr_parity_err_cnt(
2285 const struct cntr_entry *entry,
2286 void *context, int vl, int mode, u64 data)
2287{
2288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289
2290 return dd->rcv_err_status_cnt[45];
2291}
2292
2293static u64 access_rx_lookup_csr_parity_err_cnt(
2294 const struct cntr_entry *entry,
2295 void *context, int vl, int mode, u64 data)
2296{
2297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298
2299 return dd->rcv_err_status_cnt[44];
2300}
2301
2302static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2303 const struct cntr_entry *entry,
2304 void *context, int vl, int mode, u64 data)
2305{
2306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307
2308 return dd->rcv_err_status_cnt[43];
2309}
2310
2311static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2312 const struct cntr_entry *entry,
2313 void *context, int vl, int mode, u64 data)
2314{
2315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316
2317 return dd->rcv_err_status_cnt[42];
2318}
2319
2320static u64 access_rx_lookup_des_part2_parity_err_cnt(
2321 const struct cntr_entry *entry,
2322 void *context, int vl, int mode, u64 data)
2323{
2324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325
2326 return dd->rcv_err_status_cnt[41];
2327}
2328
2329static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2330 const struct cntr_entry *entry,
2331 void *context, int vl, int mode, u64 data)
2332{
2333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334
2335 return dd->rcv_err_status_cnt[40];
2336}
2337
2338static u64 access_rx_lookup_des_part1_unc_err_cnt(
2339 const struct cntr_entry *entry,
2340 void *context, int vl, int mode, u64 data)
2341{
2342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343
2344 return dd->rcv_err_status_cnt[39];
2345}
2346
2347static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2348 const struct cntr_entry *entry,
2349 void *context, int vl, int mode, u64 data)
2350{
2351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352
2353 return dd->rcv_err_status_cnt[38];
2354}
2355
2356static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2357 const struct cntr_entry *entry,
2358 void *context, int vl, int mode, u64 data)
2359{
2360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361
2362 return dd->rcv_err_status_cnt[37];
2363}
2364
2365static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2366 const struct cntr_entry *entry,
2367 void *context, int vl, int mode, u64 data)
2368{
2369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370
2371 return dd->rcv_err_status_cnt[36];
2372}
2373
2374static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2375 const struct cntr_entry *entry,
2376 void *context, int vl, int mode, u64 data)
2377{
2378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379
2380 return dd->rcv_err_status_cnt[35];
2381}
2382
2383static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2384 const struct cntr_entry *entry,
2385 void *context, int vl, int mode, u64 data)
2386{
2387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388
2389 return dd->rcv_err_status_cnt[34];
2390}
2391
2392static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2393 const struct cntr_entry *entry,
2394 void *context, int vl, int mode, u64 data)
2395{
2396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397
2398 return dd->rcv_err_status_cnt[33];
2399}
2400
2401static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2402 void *context, int vl, int mode,
2403 u64 data)
2404{
2405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406
2407 return dd->rcv_err_status_cnt[32];
2408}
2409
2410static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2411 void *context, int vl, int mode,
2412 u64 data)
2413{
2414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415
2416 return dd->rcv_err_status_cnt[31];
2417}
2418
2419static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2420 void *context, int vl, int mode,
2421 u64 data)
2422{
2423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424
2425 return dd->rcv_err_status_cnt[30];
2426}
2427
2428static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2429 void *context, int vl, int mode,
2430 u64 data)
2431{
2432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433
2434 return dd->rcv_err_status_cnt[29];
2435}
2436
2437static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2438 void *context, int vl,
2439 int mode, u64 data)
2440{
2441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442
2443 return dd->rcv_err_status_cnt[28];
2444}
2445
2446static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2447 const struct cntr_entry *entry,
2448 void *context, int vl, int mode, u64 data)
2449{
2450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451
2452 return dd->rcv_err_status_cnt[27];
2453}
2454
2455static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2456 const struct cntr_entry *entry,
2457 void *context, int vl, int mode, u64 data)
2458{
2459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460
2461 return dd->rcv_err_status_cnt[26];
2462}
2463
2464static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2465 const struct cntr_entry *entry,
2466 void *context, int vl, int mode, u64 data)
2467{
2468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469
2470 return dd->rcv_err_status_cnt[25];
2471}
2472
2473static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2474 const struct cntr_entry *entry,
2475 void *context, int vl, int mode, u64 data)
2476{
2477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478
2479 return dd->rcv_err_status_cnt[24];
2480}
2481
2482static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2483 const struct cntr_entry *entry,
2484 void *context, int vl, int mode, u64 data)
2485{
2486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487
2488 return dd->rcv_err_status_cnt[23];
2489}
2490
2491static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2492 const struct cntr_entry *entry,
2493 void *context, int vl, int mode, u64 data)
2494{
2495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496
2497 return dd->rcv_err_status_cnt[22];
2498}
2499
2500static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2501 const struct cntr_entry *entry,
2502 void *context, int vl, int mode, u64 data)
2503{
2504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505
2506 return dd->rcv_err_status_cnt[21];
2507}
2508
2509static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2510 const struct cntr_entry *entry,
2511 void *context, int vl, int mode, u64 data)
2512{
2513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514
2515 return dd->rcv_err_status_cnt[20];
2516}
2517
2518static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2519 const struct cntr_entry *entry,
2520 void *context, int vl, int mode, u64 data)
2521{
2522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523
2524 return dd->rcv_err_status_cnt[19];
2525}
2526
2527static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2528 void *context, int vl,
2529 int mode, u64 data)
2530{
2531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532
2533 return dd->rcv_err_status_cnt[18];
2534}
2535
2536static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2537 void *context, int vl,
2538 int mode, u64 data)
2539{
2540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541
2542 return dd->rcv_err_status_cnt[17];
2543}
2544
2545static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2546 const struct cntr_entry *entry,
2547 void *context, int vl, int mode, u64 data)
2548{
2549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550
2551 return dd->rcv_err_status_cnt[16];
2552}
2553
2554static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2555 const struct cntr_entry *entry,
2556 void *context, int vl, int mode, u64 data)
2557{
2558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559
2560 return dd->rcv_err_status_cnt[15];
2561}
2562
2563static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2564 void *context, int vl,
2565 int mode, u64 data)
2566{
2567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568
2569 return dd->rcv_err_status_cnt[14];
2570}
2571
2572static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2573 void *context, int vl,
2574 int mode, u64 data)
2575{
2576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577
2578 return dd->rcv_err_status_cnt[13];
2579}
2580
2581static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2582 void *context, int vl, int mode,
2583 u64 data)
2584{
2585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586
2587 return dd->rcv_err_status_cnt[12];
2588}
2589
2590static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2591 void *context, int vl, int mode,
2592 u64 data)
2593{
2594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595
2596 return dd->rcv_err_status_cnt[11];
2597}
2598
2599static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2600 void *context, int vl, int mode,
2601 u64 data)
2602{
2603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604
2605 return dd->rcv_err_status_cnt[10];
2606}
2607
2608static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2609 void *context, int vl, int mode,
2610 u64 data)
2611{
2612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613
2614 return dd->rcv_err_status_cnt[9];
2615}
2616
2617static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2618 void *context, int vl, int mode,
2619 u64 data)
2620{
2621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622
2623 return dd->rcv_err_status_cnt[8];
2624}
2625
2626static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2627 const struct cntr_entry *entry,
2628 void *context, int vl, int mode, u64 data)
2629{
2630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631
2632 return dd->rcv_err_status_cnt[7];
2633}
2634
2635static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2636 const struct cntr_entry *entry,
2637 void *context, int vl, int mode, u64 data)
2638{
2639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640
2641 return dd->rcv_err_status_cnt[6];
2642}
2643
2644static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2645 void *context, int vl, int mode,
2646 u64 data)
2647{
2648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649
2650 return dd->rcv_err_status_cnt[5];
2651}
2652
2653static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2654 void *context, int vl, int mode,
2655 u64 data)
2656{
2657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658
2659 return dd->rcv_err_status_cnt[4];
2660}
2661
2662static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2663 void *context, int vl, int mode,
2664 u64 data)
2665{
2666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667
2668 return dd->rcv_err_status_cnt[3];
2669}
2670
2671static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2672 void *context, int vl, int mode,
2673 u64 data)
2674{
2675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676
2677 return dd->rcv_err_status_cnt[2];
2678}
2679
2680static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2681 void *context, int vl, int mode,
2682 u64 data)
2683{
2684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685
2686 return dd->rcv_err_status_cnt[1];
2687}
2688
2689static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2690 void *context, int vl, int mode,
2691 u64 data)
2692{
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695 return dd->rcv_err_status_cnt[0];
2696}
2697
2698/*
2699 * Software counters corresponding to each of the
2700 * error status bits within SendPioErrStatus
2701 */
2702static u64 access_pio_pec_sop_head_parity_err_cnt(
2703 const struct cntr_entry *entry,
2704 void *context, int vl, int mode, u64 data)
2705{
2706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2707
2708 return dd->send_pio_err_status_cnt[35];
2709}
2710
2711static u64 access_pio_pcc_sop_head_parity_err_cnt(
2712 const struct cntr_entry *entry,
2713 void *context, int vl, int mode, u64 data)
2714{
2715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2716
2717 return dd->send_pio_err_status_cnt[34];
2718}
2719
2720static u64 access_pio_last_returned_cnt_parity_err_cnt(
2721 const struct cntr_entry *entry,
2722 void *context, int vl, int mode, u64 data)
2723{
2724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2725
2726 return dd->send_pio_err_status_cnt[33];
2727}
2728
2729static u64 access_pio_current_free_cnt_parity_err_cnt(
2730 const struct cntr_entry *entry,
2731 void *context, int vl, int mode, u64 data)
2732{
2733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2734
2735 return dd->send_pio_err_status_cnt[32];
2736}
2737
2738static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2739 void *context, int vl, int mode,
2740 u64 data)
2741{
2742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2743
2744 return dd->send_pio_err_status_cnt[31];
2745}
2746
2747static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2748 void *context, int vl, int mode,
2749 u64 data)
2750{
2751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2752
2753 return dd->send_pio_err_status_cnt[30];
2754}
2755
2756static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2757 void *context, int vl, int mode,
2758 u64 data)
2759{
2760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2761
2762 return dd->send_pio_err_status_cnt[29];
2763}
2764
2765static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2766 const struct cntr_entry *entry,
2767 void *context, int vl, int mode, u64 data)
2768{
2769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2770
2771 return dd->send_pio_err_status_cnt[28];
2772}
2773
2774static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2775 void *context, int vl, int mode,
2776 u64 data)
2777{
2778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2779
2780 return dd->send_pio_err_status_cnt[27];
2781}
2782
2783static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2784 void *context, int vl, int mode,
2785 u64 data)
2786{
2787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2788
2789 return dd->send_pio_err_status_cnt[26];
2790}
2791
2792static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2793 void *context, int vl,
2794 int mode, u64 data)
2795{
2796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2797
2798 return dd->send_pio_err_status_cnt[25];
2799}
2800
2801static u64 access_pio_block_qw_count_parity_err_cnt(
2802 const struct cntr_entry *entry,
2803 void *context, int vl, int mode, u64 data)
2804{
2805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2806
2807 return dd->send_pio_err_status_cnt[24];
2808}
2809
2810static u64 access_pio_write_qw_valid_parity_err_cnt(
2811 const struct cntr_entry *entry,
2812 void *context, int vl, int mode, u64 data)
2813{
2814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815
2816 return dd->send_pio_err_status_cnt[23];
2817}
2818
2819static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2820 void *context, int vl, int mode,
2821 u64 data)
2822{
2823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824
2825 return dd->send_pio_err_status_cnt[22];
2826}
2827
2828static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2829 void *context, int vl,
2830 int mode, u64 data)
2831{
2832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833
2834 return dd->send_pio_err_status_cnt[21];
2835}
2836
2837static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2838 void *context, int vl,
2839 int mode, u64 data)
2840{
2841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842
2843 return dd->send_pio_err_status_cnt[20];
2844}
2845
2846static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2847 void *context, int vl,
2848 int mode, u64 data)
2849{
2850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851
2852 return dd->send_pio_err_status_cnt[19];
2853}
2854
2855static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2856 const struct cntr_entry *entry,
2857 void *context, int vl, int mode, u64 data)
2858{
2859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860
2861 return dd->send_pio_err_status_cnt[18];
2862}
2863
2864static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2865 void *context, int vl, int mode,
2866 u64 data)
2867{
2868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869
2870 return dd->send_pio_err_status_cnt[17];
2871}
2872
2873static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2874 void *context, int vl, int mode,
2875 u64 data)
2876{
2877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878
2879 return dd->send_pio_err_status_cnt[16];
2880}
2881
2882static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2883 const struct cntr_entry *entry,
2884 void *context, int vl, int mode, u64 data)
2885{
2886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887
2888 return dd->send_pio_err_status_cnt[15];
2889}
2890
2891static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2892 const struct cntr_entry *entry,
2893 void *context, int vl, int mode, u64 data)
2894{
2895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896
2897 return dd->send_pio_err_status_cnt[14];
2898}
2899
2900static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2901 const struct cntr_entry *entry,
2902 void *context, int vl, int mode, u64 data)
2903{
2904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905
2906 return dd->send_pio_err_status_cnt[13];
2907}
2908
2909static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2910 const struct cntr_entry *entry,
2911 void *context, int vl, int mode, u64 data)
2912{
2913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914
2915 return dd->send_pio_err_status_cnt[12];
2916}
2917
2918static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2919 const struct cntr_entry *entry,
2920 void *context, int vl, int mode, u64 data)
2921{
2922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923
2924 return dd->send_pio_err_status_cnt[11];
2925}
2926
2927static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2928 const struct cntr_entry *entry,
2929 void *context, int vl, int mode, u64 data)
2930{
2931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932
2933 return dd->send_pio_err_status_cnt[10];
2934}
2935
2936static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2937 const struct cntr_entry *entry,
2938 void *context, int vl, int mode, u64 data)
2939{
2940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941
2942 return dd->send_pio_err_status_cnt[9];
2943}
2944
2945static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2946 const struct cntr_entry *entry,
2947 void *context, int vl, int mode, u64 data)
2948{
2949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950
2951 return dd->send_pio_err_status_cnt[8];
2952}
2953
2954static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2955 const struct cntr_entry *entry,
2956 void *context, int vl, int mode, u64 data)
2957{
2958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959
2960 return dd->send_pio_err_status_cnt[7];
2961}
2962
2963static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2964 void *context, int vl, int mode,
2965 u64 data)
2966{
2967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968
2969 return dd->send_pio_err_status_cnt[6];
2970}
2971
2972static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2973 void *context, int vl, int mode,
2974 u64 data)
2975{
2976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977
2978 return dd->send_pio_err_status_cnt[5];
2979}
2980
2981static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2982 void *context, int vl, int mode,
2983 u64 data)
2984{
2985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986
2987 return dd->send_pio_err_status_cnt[4];
2988}
2989
2990static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2991 void *context, int vl, int mode,
2992 u64 data)
2993{
2994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995
2996 return dd->send_pio_err_status_cnt[3];
2997}
2998
2999static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3000 void *context, int vl, int mode,
3001 u64 data)
3002{
3003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004
3005 return dd->send_pio_err_status_cnt[2];
3006}
3007
3008static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3009 void *context, int vl,
3010 int mode, u64 data)
3011{
3012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013
3014 return dd->send_pio_err_status_cnt[1];
3015}
3016
3017static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3018 void *context, int vl, int mode,
3019 u64 data)
3020{
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023 return dd->send_pio_err_status_cnt[0];
3024}
3025
3026/*
3027 * Software counters corresponding to each of the
3028 * error status bits within SendDmaErrStatus
3029 */
3030static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3031 const struct cntr_entry *entry,
3032 void *context, int vl, int mode, u64 data)
3033{
3034 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3035
3036 return dd->send_dma_err_status_cnt[3];
3037}
3038
3039static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3040 const struct cntr_entry *entry,
3041 void *context, int vl, int mode, u64 data)
3042{
3043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3044
3045 return dd->send_dma_err_status_cnt[2];
3046}
3047
3048static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3049 void *context, int vl, int mode,
3050 u64 data)
3051{
3052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3053
3054 return dd->send_dma_err_status_cnt[1];
3055}
3056
3057static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3058 void *context, int vl, int mode,
3059 u64 data)
3060{
3061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3062
3063 return dd->send_dma_err_status_cnt[0];
3064}
3065
3066/*
3067 * Software counters corresponding to each of the
3068 * error status bits within SendEgressErrStatus
3069 */
3070static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3071 const struct cntr_entry *entry,
3072 void *context, int vl, int mode, u64 data)
3073{
3074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076 return dd->send_egress_err_status_cnt[63];
3077}
3078
3079static u64 access_tx_read_sdma_memory_csr_err_cnt(
3080 const struct cntr_entry *entry,
3081 void *context, int vl, int mode, u64 data)
3082{
3083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085 return dd->send_egress_err_status_cnt[62];
3086}
3087
3088static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3089 void *context, int vl, int mode,
3090 u64 data)
3091{
3092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094 return dd->send_egress_err_status_cnt[61];
3095}
3096
3097static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3098 void *context, int vl,
3099 int mode, u64 data)
3100{
3101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103 return dd->send_egress_err_status_cnt[60];
3104}
3105
3106static u64 access_tx_read_sdma_memory_cor_err_cnt(
3107 const struct cntr_entry *entry,
3108 void *context, int vl, int mode, u64 data)
3109{
3110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112 return dd->send_egress_err_status_cnt[59];
3113}
3114
3115static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3116 void *context, int vl, int mode,
3117 u64 data)
3118{
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121 return dd->send_egress_err_status_cnt[58];
3122}
3123
3124static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3125 void *context, int vl, int mode,
3126 u64 data)
3127{
3128 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3129
3130 return dd->send_egress_err_status_cnt[57];
3131}
3132
3133static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3134 void *context, int vl, int mode,
3135 u64 data)
3136{
3137 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3138
3139 return dd->send_egress_err_status_cnt[56];
3140}
3141
3142static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3143 void *context, int vl, int mode,
3144 u64 data)
3145{
3146 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3147
3148 return dd->send_egress_err_status_cnt[55];
3149}
3150
3151static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3152 void *context, int vl, int mode,
3153 u64 data)
3154{
3155 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3156
3157 return dd->send_egress_err_status_cnt[54];
3158}
3159
3160static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3161 void *context, int vl, int mode,
3162 u64 data)
3163{
3164 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3165
3166 return dd->send_egress_err_status_cnt[53];
3167}
3168
3169static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3170 void *context, int vl, int mode,
3171 u64 data)
3172{
3173 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3174
3175 return dd->send_egress_err_status_cnt[52];
3176}
3177
3178static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3179 void *context, int vl, int mode,
3180 u64 data)
3181{
3182 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183
3184 return dd->send_egress_err_status_cnt[51];
3185}
3186
3187static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3188 void *context, int vl, int mode,
3189 u64 data)
3190{
3191 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192
3193 return dd->send_egress_err_status_cnt[50];
3194}
3195
3196static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3197 void *context, int vl, int mode,
3198 u64 data)
3199{
3200 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201
3202 return dd->send_egress_err_status_cnt[49];
3203}
3204
3205static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3206 void *context, int vl, int mode,
3207 u64 data)
3208{
3209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210
3211 return dd->send_egress_err_status_cnt[48];
3212}
3213
3214static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3215 void *context, int vl, int mode,
3216 u64 data)
3217{
3218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219
3220 return dd->send_egress_err_status_cnt[47];
3221}
3222
3223static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3224 void *context, int vl, int mode,
3225 u64 data)
3226{
3227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228
3229 return dd->send_egress_err_status_cnt[46];
3230}
3231
3232static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3233 void *context, int vl, int mode,
3234 u64 data)
3235{
3236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237
3238 return dd->send_egress_err_status_cnt[45];
3239}
3240
3241static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3242 void *context, int vl,
3243 int mode, u64 data)
3244{
3245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246
3247 return dd->send_egress_err_status_cnt[44];
3248}
3249
3250static u64 access_tx_read_sdma_memory_unc_err_cnt(
3251 const struct cntr_entry *entry,
3252 void *context, int vl, int mode, u64 data)
3253{
3254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255
3256 return dd->send_egress_err_status_cnt[43];
3257}
3258
3259static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3260 void *context, int vl, int mode,
3261 u64 data)
3262{
3263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264
3265 return dd->send_egress_err_status_cnt[42];
3266}
3267
3268static u64 access_tx_credit_return_partiy_err_cnt(
3269 const struct cntr_entry *entry,
3270 void *context, int vl, int mode, u64 data)
3271{
3272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273
3274 return dd->send_egress_err_status_cnt[41];
3275}
3276
3277static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3278 const struct cntr_entry *entry,
3279 void *context, int vl, int mode, u64 data)
3280{
3281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282
3283 return dd->send_egress_err_status_cnt[40];
3284}
3285
3286static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3287 const struct cntr_entry *entry,
3288 void *context, int vl, int mode, u64 data)
3289{
3290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291
3292 return dd->send_egress_err_status_cnt[39];
3293}
3294
3295static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3296 const struct cntr_entry *entry,
3297 void *context, int vl, int mode, u64 data)
3298{
3299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300
3301 return dd->send_egress_err_status_cnt[38];
3302}
3303
3304static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3305 const struct cntr_entry *entry,
3306 void *context, int vl, int mode, u64 data)
3307{
3308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309
3310 return dd->send_egress_err_status_cnt[37];
3311}
3312
3313static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3314 const struct cntr_entry *entry,
3315 void *context, int vl, int mode, u64 data)
3316{
3317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318
3319 return dd->send_egress_err_status_cnt[36];
3320}
3321
3322static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3323 const struct cntr_entry *entry,
3324 void *context, int vl, int mode, u64 data)
3325{
3326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327
3328 return dd->send_egress_err_status_cnt[35];
3329}
3330
3331static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3332 const struct cntr_entry *entry,
3333 void *context, int vl, int mode, u64 data)
3334{
3335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336
3337 return dd->send_egress_err_status_cnt[34];
3338}
3339
3340static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3341 const struct cntr_entry *entry,
3342 void *context, int vl, int mode, u64 data)
3343{
3344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345
3346 return dd->send_egress_err_status_cnt[33];
3347}
3348
3349static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3350 const struct cntr_entry *entry,
3351 void *context, int vl, int mode, u64 data)
3352{
3353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354
3355 return dd->send_egress_err_status_cnt[32];
3356}
3357
3358static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3359 const struct cntr_entry *entry,
3360 void *context, int vl, int mode, u64 data)
3361{
3362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363
3364 return dd->send_egress_err_status_cnt[31];
3365}
3366
3367static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3368 const struct cntr_entry *entry,
3369 void *context, int vl, int mode, u64 data)
3370{
3371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372
3373 return dd->send_egress_err_status_cnt[30];
3374}
3375
3376static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3377 const struct cntr_entry *entry,
3378 void *context, int vl, int mode, u64 data)
3379{
3380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381
3382 return dd->send_egress_err_status_cnt[29];
3383}
3384
3385static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3386 const struct cntr_entry *entry,
3387 void *context, int vl, int mode, u64 data)
3388{
3389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390
3391 return dd->send_egress_err_status_cnt[28];
3392}
3393
3394static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3395 const struct cntr_entry *entry,
3396 void *context, int vl, int mode, u64 data)
3397{
3398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399
3400 return dd->send_egress_err_status_cnt[27];
3401}
3402
3403static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3404 const struct cntr_entry *entry,
3405 void *context, int vl, int mode, u64 data)
3406{
3407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408
3409 return dd->send_egress_err_status_cnt[26];
3410}
3411
3412static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3413 const struct cntr_entry *entry,
3414 void *context, int vl, int mode, u64 data)
3415{
3416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417
3418 return dd->send_egress_err_status_cnt[25];
3419}
3420
3421static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3422 const struct cntr_entry *entry,
3423 void *context, int vl, int mode, u64 data)
3424{
3425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426
3427 return dd->send_egress_err_status_cnt[24];
3428}
3429
3430static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3431 const struct cntr_entry *entry,
3432 void *context, int vl, int mode, u64 data)
3433{
3434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435
3436 return dd->send_egress_err_status_cnt[23];
3437}
3438
3439static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3440 const struct cntr_entry *entry,
3441 void *context, int vl, int mode, u64 data)
3442{
3443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444
3445 return dd->send_egress_err_status_cnt[22];
3446}
3447
3448static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3449 const struct cntr_entry *entry,
3450 void *context, int vl, int mode, u64 data)
3451{
3452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453
3454 return dd->send_egress_err_status_cnt[21];
3455}
3456
3457static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3458 const struct cntr_entry *entry,
3459 void *context, int vl, int mode, u64 data)
3460{
3461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462
3463 return dd->send_egress_err_status_cnt[20];
3464}
3465
3466static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3467 const struct cntr_entry *entry,
3468 void *context, int vl, int mode, u64 data)
3469{
3470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471
3472 return dd->send_egress_err_status_cnt[19];
3473}
3474
3475static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3476 const struct cntr_entry *entry,
3477 void *context, int vl, int mode, u64 data)
3478{
3479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480
3481 return dd->send_egress_err_status_cnt[18];
3482}
3483
3484static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3485 const struct cntr_entry *entry,
3486 void *context, int vl, int mode, u64 data)
3487{
3488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489
3490 return dd->send_egress_err_status_cnt[17];
3491}
3492
3493static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3494 const struct cntr_entry *entry,
3495 void *context, int vl, int mode, u64 data)
3496{
3497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498
3499 return dd->send_egress_err_status_cnt[16];
3500}
3501
3502static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3503 void *context, int vl, int mode,
3504 u64 data)
3505{
3506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507
3508 return dd->send_egress_err_status_cnt[15];
3509}
3510
3511static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3512 void *context, int vl,
3513 int mode, u64 data)
3514{
3515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516
3517 return dd->send_egress_err_status_cnt[14];
3518}
3519
3520static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3521 void *context, int vl, int mode,
3522 u64 data)
3523{
3524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525
3526 return dd->send_egress_err_status_cnt[13];
3527}
3528
3529static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3530 void *context, int vl, int mode,
3531 u64 data)
3532{
3533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534
3535 return dd->send_egress_err_status_cnt[12];
3536}
3537
3538static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3539 const struct cntr_entry *entry,
3540 void *context, int vl, int mode, u64 data)
3541{
3542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543
3544 return dd->send_egress_err_status_cnt[11];
3545}
3546
3547static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3548 void *context, int vl, int mode,
3549 u64 data)
3550{
3551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552
3553 return dd->send_egress_err_status_cnt[10];
3554}
3555
3556static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3557 void *context, int vl, int mode,
3558 u64 data)
3559{
3560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561
3562 return dd->send_egress_err_status_cnt[9];
3563}
3564
3565static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3566 const struct cntr_entry *entry,
3567 void *context, int vl, int mode, u64 data)
3568{
3569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570
3571 return dd->send_egress_err_status_cnt[8];
3572}
3573
3574static u64 access_tx_pio_launch_intf_parity_err_cnt(
3575 const struct cntr_entry *entry,
3576 void *context, int vl, int mode, u64 data)
3577{
3578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579
3580 return dd->send_egress_err_status_cnt[7];
3581}
3582
3583static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3584 void *context, int vl, int mode,
3585 u64 data)
3586{
3587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588
3589 return dd->send_egress_err_status_cnt[6];
3590}
3591
3592static u64 access_tx_incorrect_link_state_err_cnt(
3593 const struct cntr_entry *entry,
3594 void *context, int vl, int mode, u64 data)
3595{
3596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597
3598 return dd->send_egress_err_status_cnt[5];
3599}
3600
3601static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3602 void *context, int vl, int mode,
3603 u64 data)
3604{
3605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606
3607 return dd->send_egress_err_status_cnt[4];
3608}
3609
3610static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3611 const struct cntr_entry *entry,
3612 void *context, int vl, int mode, u64 data)
3613{
3614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615
3616 return dd->send_egress_err_status_cnt[3];
3617}
3618
3619static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3620 void *context, int vl, int mode,
3621 u64 data)
3622{
3623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624
3625 return dd->send_egress_err_status_cnt[2];
3626}
3627
3628static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3629 const struct cntr_entry *entry,
3630 void *context, int vl, int mode, u64 data)
3631{
3632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633
3634 return dd->send_egress_err_status_cnt[1];
3635}
3636
3637static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3640{
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643 return dd->send_egress_err_status_cnt[0];
3644}
3645
3646/*
3647 * Software counters corresponding to each of the
3648 * error status bits within SendErrStatus
3649 */
3650static u64 access_send_csr_write_bad_addr_err_cnt(
3651 const struct cntr_entry *entry,
3652 void *context, int vl, int mode, u64 data)
3653{
3654 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3655
3656 return dd->send_err_status_cnt[2];
3657}
3658
3659static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3660 void *context, int vl,
3661 int mode, u64 data)
3662{
3663 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3664
3665 return dd->send_err_status_cnt[1];
3666}
3667
3668static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3669 void *context, int vl, int mode,
3670 u64 data)
3671{
3672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3673
3674 return dd->send_err_status_cnt[0];
3675}
3676
3677/*
3678 * Software counters corresponding to each of the
3679 * error status bits within SendCtxtErrStatus
3680 */
3681static u64 access_pio_write_out_of_bounds_err_cnt(
3682 const struct cntr_entry *entry,
3683 void *context, int vl, int mode, u64 data)
3684{
3685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687 return dd->sw_ctxt_err_status_cnt[4];
3688}
3689
3690static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3691 void *context, int vl, int mode,
3692 u64 data)
3693{
3694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696 return dd->sw_ctxt_err_status_cnt[3];
3697}
3698
3699static u64 access_pio_write_crosses_boundary_err_cnt(
3700 const struct cntr_entry *entry,
3701 void *context, int vl, int mode, u64 data)
3702{
3703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705 return dd->sw_ctxt_err_status_cnt[2];
3706}
3707
3708static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3709 void *context, int vl,
3710 int mode, u64 data)
3711{
3712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714 return dd->sw_ctxt_err_status_cnt[1];
3715}
3716
3717static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3718 void *context, int vl, int mode,
3719 u64 data)
3720{
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723 return dd->sw_ctxt_err_status_cnt[0];
3724}
3725
3726/*
3727 * Software counters corresponding to each of the
3728 * error status bits within SendDmaEngErrStatus
3729 */
3730static u64 access_sdma_header_request_fifo_cor_err_cnt(
3731 const struct cntr_entry *entry,
3732 void *context, int vl, int mode, u64 data)
3733{
3734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3735
3736 return dd->sw_send_dma_eng_err_status_cnt[23];
3737}
3738
3739static u64 access_sdma_header_storage_cor_err_cnt(
3740 const struct cntr_entry *entry,
3741 void *context, int vl, int mode, u64 data)
3742{
3743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3744
3745 return dd->sw_send_dma_eng_err_status_cnt[22];
3746}
3747
3748static u64 access_sdma_packet_tracking_cor_err_cnt(
3749 const struct cntr_entry *entry,
3750 void *context, int vl, int mode, u64 data)
3751{
3752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754 return dd->sw_send_dma_eng_err_status_cnt[21];
3755}
3756
3757static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3758 void *context, int vl, int mode,
3759 u64 data)
3760{
3761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763 return dd->sw_send_dma_eng_err_status_cnt[20];
3764}
3765
3766static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3767 void *context, int vl, int mode,
3768 u64 data)
3769{
3770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772 return dd->sw_send_dma_eng_err_status_cnt[19];
3773}
3774
3775static u64 access_sdma_header_request_fifo_unc_err_cnt(
3776 const struct cntr_entry *entry,
3777 void *context, int vl, int mode, u64 data)
3778{
3779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3780
3781 return dd->sw_send_dma_eng_err_status_cnt[18];
3782}
3783
3784static u64 access_sdma_header_storage_unc_err_cnt(
3785 const struct cntr_entry *entry,
3786 void *context, int vl, int mode, u64 data)
3787{
3788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3789
3790 return dd->sw_send_dma_eng_err_status_cnt[17];
3791}
3792
3793static u64 access_sdma_packet_tracking_unc_err_cnt(
3794 const struct cntr_entry *entry,
3795 void *context, int vl, int mode, u64 data)
3796{
3797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3798
3799 return dd->sw_send_dma_eng_err_status_cnt[16];
3800}
3801
3802static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3803 void *context, int vl, int mode,
3804 u64 data)
3805{
3806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3807
3808 return dd->sw_send_dma_eng_err_status_cnt[15];
3809}
3810
3811static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3812 void *context, int vl, int mode,
3813 u64 data)
3814{
3815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3816
3817 return dd->sw_send_dma_eng_err_status_cnt[14];
3818}
3819
3820static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3821 void *context, int vl, int mode,
3822 u64 data)
3823{
3824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3825
3826 return dd->sw_send_dma_eng_err_status_cnt[13];
3827}
3828
3829static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3830 void *context, int vl, int mode,
3831 u64 data)
3832{
3833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3834
3835 return dd->sw_send_dma_eng_err_status_cnt[12];
3836}
3837
3838static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3839 void *context, int vl, int mode,
3840 u64 data)
3841{
3842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843
3844 return dd->sw_send_dma_eng_err_status_cnt[11];
3845}
3846
3847static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3848 void *context, int vl, int mode,
3849 u64 data)
3850{
3851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852
3853 return dd->sw_send_dma_eng_err_status_cnt[10];
3854}
3855
3856static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3857 void *context, int vl, int mode,
3858 u64 data)
3859{
3860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861
3862 return dd->sw_send_dma_eng_err_status_cnt[9];
3863}
3864
3865static u64 access_sdma_packet_desc_overflow_err_cnt(
3866 const struct cntr_entry *entry,
3867 void *context, int vl, int mode, u64 data)
3868{
3869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870
3871 return dd->sw_send_dma_eng_err_status_cnt[8];
3872}
3873
3874static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3875 void *context, int vl,
3876 int mode, u64 data)
3877{
3878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879
3880 return dd->sw_send_dma_eng_err_status_cnt[7];
3881}
3882
3883static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3884 void *context, int vl, int mode, u64 data)
3885{
3886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888 return dd->sw_send_dma_eng_err_status_cnt[6];
3889}
3890
3891static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3892 void *context, int vl, int mode,
3893 u64 data)
3894{
3895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897 return dd->sw_send_dma_eng_err_status_cnt[5];
3898}
3899
3900static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3901 void *context, int vl, int mode,
3902 u64 data)
3903{
3904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906 return dd->sw_send_dma_eng_err_status_cnt[4];
3907}
3908
3909static u64 access_sdma_tail_out_of_bounds_err_cnt(
3910 const struct cntr_entry *entry,
3911 void *context, int vl, int mode, u64 data)
3912{
3913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915 return dd->sw_send_dma_eng_err_status_cnt[3];
3916}
3917
3918static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3919 void *context, int vl, int mode,
3920 u64 data)
3921{
3922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924 return dd->sw_send_dma_eng_err_status_cnt[2];
3925}
3926
3927static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3928 void *context, int vl, int mode,
3929 u64 data)
3930{
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933 return dd->sw_send_dma_eng_err_status_cnt[1];
3934}
3935
3936static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3938 u64 data)
3939{
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942 return dd->sw_send_dma_eng_err_status_cnt[0];
3943}
3944
Mike Marciniszyn77241052015-07-30 15:17:43 -04003945#define def_access_sw_cpu(cntr) \
3946static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3947 void *context, int vl, int mode, u64 data) \
3948{ \
3949 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003950 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3951 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003952 mode, data); \
3953}
3954
3955def_access_sw_cpu(rc_acks);
3956def_access_sw_cpu(rc_qacks);
3957def_access_sw_cpu(rc_delayed_comp);
3958
3959#define def_access_ibp_counter(cntr) \
3960static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3961 void *context, int vl, int mode, u64 data) \
3962{ \
3963 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3964 \
3965 if (vl != CNTR_INVALID_VL) \
3966 return 0; \
3967 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003968 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003969 mode, data); \
3970}
3971
3972def_access_ibp_counter(loop_pkts);
3973def_access_ibp_counter(rc_resends);
3974def_access_ibp_counter(rnr_naks);
3975def_access_ibp_counter(other_naks);
3976def_access_ibp_counter(rc_timeouts);
3977def_access_ibp_counter(pkt_drops);
3978def_access_ibp_counter(dmawait);
3979def_access_ibp_counter(rc_seqnak);
3980def_access_ibp_counter(rc_dupreq);
3981def_access_ibp_counter(rdma_seq);
3982def_access_ibp_counter(unaligned);
3983def_access_ibp_counter(seq_naks);
3984
3985static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3986[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3987[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3988 CNTR_NORMAL),
3989[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3990 CNTR_NORMAL),
3991[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3992 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3993 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003994[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3995 CNTR_NORMAL),
3996[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3997 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3998[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3999 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4000[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4001 CNTR_NORMAL),
4002[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4003 CNTR_NORMAL),
4004[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4005 CNTR_NORMAL),
4006[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4007 CNTR_NORMAL),
4008[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4009 CNTR_NORMAL),
4010[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4011 CNTR_NORMAL),
4012[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4013 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4014[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4015 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4016[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4017 CNTR_SYNTH),
4018[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4019[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4020 CNTR_SYNTH),
4021[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4022 CNTR_SYNTH),
4023[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4024 CNTR_SYNTH),
4025[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4026 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4027[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4028 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4029 CNTR_SYNTH),
4030[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4031 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4032[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4033 CNTR_SYNTH),
4034[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4035 CNTR_SYNTH),
4036[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4037 CNTR_SYNTH),
4038[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4039 CNTR_SYNTH),
4040[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4041 CNTR_SYNTH),
4042[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4043 CNTR_SYNTH),
4044[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4045 CNTR_SYNTH),
4046[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4047 CNTR_SYNTH | CNTR_VL),
4048[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4049 CNTR_SYNTH | CNTR_VL),
4050[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4051[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4052 CNTR_SYNTH | CNTR_VL),
4053[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4054[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4055 CNTR_SYNTH | CNTR_VL),
4056[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4057 CNTR_SYNTH),
4058[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4059 CNTR_SYNTH | CNTR_VL),
4060[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4061 CNTR_SYNTH),
4062[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4063 CNTR_SYNTH | CNTR_VL),
4064[C_DC_TOTAL_CRC] =
4065 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4066 CNTR_SYNTH),
4067[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4068 CNTR_SYNTH),
4069[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4070 CNTR_SYNTH),
4071[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4072 CNTR_SYNTH),
4073[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4074 CNTR_SYNTH),
4075[C_DC_CRC_MULT_LN] =
4076 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4077 CNTR_SYNTH),
4078[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4079 CNTR_SYNTH),
4080[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4081 CNTR_SYNTH),
4082[C_DC_SEQ_CRC_CNT] =
4083 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4084 CNTR_SYNTH),
4085[C_DC_ESC0_ONLY_CNT] =
4086 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4087 CNTR_SYNTH),
4088[C_DC_ESC0_PLUS1_CNT] =
4089 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4090 CNTR_SYNTH),
4091[C_DC_ESC0_PLUS2_CNT] =
4092 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4093 CNTR_SYNTH),
4094[C_DC_REINIT_FROM_PEER_CNT] =
4095 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4096 CNTR_SYNTH),
4097[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4098 CNTR_SYNTH),
4099[C_DC_MISC_FLG_CNT] =
4100 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4101 CNTR_SYNTH),
4102[C_DC_PRF_GOOD_LTP_CNT] =
4103 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4104[C_DC_PRF_ACCEPTED_LTP_CNT] =
4105 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4106 CNTR_SYNTH),
4107[C_DC_PRF_RX_FLIT_CNT] =
4108 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4109[C_DC_PRF_TX_FLIT_CNT] =
4110 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4111[C_DC_PRF_CLK_CNTR] =
4112 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4113[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4114 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4115[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4116 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4117 CNTR_SYNTH),
4118[C_DC_PG_STS_TX_SBE_CNT] =
4119 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4120[C_DC_PG_STS_TX_MBE_CNT] =
4121 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4122 CNTR_SYNTH),
4123[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4124 access_sw_cpu_intr),
4125[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4126 access_sw_cpu_rcv_limit),
4127[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4128 access_sw_vtx_wait),
4129[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4130 access_sw_pio_wait),
4131[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4132 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004133[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4134 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004135[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4136 SEND_DMA_DESC_FETCHED_CNT, 0,
4137 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4138 dev_access_u32_csr),
4139[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4140 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4141 access_sde_int_cnt),
4142[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4143 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4144 access_sde_err_cnt),
4145[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4146 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4147 access_sde_idle_int_cnt),
4148[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4149 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4150 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004151/* MISC_ERR_STATUS */
4152[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4153 CNTR_NORMAL,
4154 access_misc_pll_lock_fail_err_cnt),
4155[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4156 CNTR_NORMAL,
4157 access_misc_mbist_fail_err_cnt),
4158[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4159 CNTR_NORMAL,
4160 access_misc_invalid_eep_cmd_err_cnt),
4161[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4162 CNTR_NORMAL,
4163 access_misc_efuse_done_parity_err_cnt),
4164[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4165 CNTR_NORMAL,
4166 access_misc_efuse_write_err_cnt),
4167[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4168 0, CNTR_NORMAL,
4169 access_misc_efuse_read_bad_addr_err_cnt),
4170[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4171 CNTR_NORMAL,
4172 access_misc_efuse_csr_parity_err_cnt),
4173[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4174 CNTR_NORMAL,
4175 access_misc_fw_auth_failed_err_cnt),
4176[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4177 CNTR_NORMAL,
4178 access_misc_key_mismatch_err_cnt),
4179[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4180 CNTR_NORMAL,
4181 access_misc_sbus_write_failed_err_cnt),
4182[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4183 CNTR_NORMAL,
4184 access_misc_csr_write_bad_addr_err_cnt),
4185[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4186 CNTR_NORMAL,
4187 access_misc_csr_read_bad_addr_err_cnt),
4188[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4189 CNTR_NORMAL,
4190 access_misc_csr_parity_err_cnt),
4191/* CceErrStatus */
4192[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4193 CNTR_NORMAL,
4194 access_sw_cce_err_status_aggregated_cnt),
4195[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4196 CNTR_NORMAL,
4197 access_cce_msix_csr_parity_err_cnt),
4198[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4199 CNTR_NORMAL,
4200 access_cce_int_map_unc_err_cnt),
4201[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4202 CNTR_NORMAL,
4203 access_cce_int_map_cor_err_cnt),
4204[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4205 CNTR_NORMAL,
4206 access_cce_msix_table_unc_err_cnt),
4207[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4208 CNTR_NORMAL,
4209 access_cce_msix_table_cor_err_cnt),
4210[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4211 0, CNTR_NORMAL,
4212 access_cce_rxdma_conv_fifo_parity_err_cnt),
4213[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4214 0, CNTR_NORMAL,
4215 access_cce_rcpl_async_fifo_parity_err_cnt),
4216[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4217 CNTR_NORMAL,
4218 access_cce_seg_write_bad_addr_err_cnt),
4219[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4220 CNTR_NORMAL,
4221 access_cce_seg_read_bad_addr_err_cnt),
4222[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4223 CNTR_NORMAL,
4224 access_la_triggered_cnt),
4225[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4226 CNTR_NORMAL,
4227 access_cce_trgt_cpl_timeout_err_cnt),
4228[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4229 CNTR_NORMAL,
4230 access_pcic_receive_parity_err_cnt),
4231[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4232 CNTR_NORMAL,
4233 access_pcic_transmit_back_parity_err_cnt),
4234[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4235 0, CNTR_NORMAL,
4236 access_pcic_transmit_front_parity_err_cnt),
4237[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4238 CNTR_NORMAL,
4239 access_pcic_cpl_dat_q_unc_err_cnt),
4240[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4241 CNTR_NORMAL,
4242 access_pcic_cpl_hd_q_unc_err_cnt),
4243[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4244 CNTR_NORMAL,
4245 access_pcic_post_dat_q_unc_err_cnt),
4246[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4247 CNTR_NORMAL,
4248 access_pcic_post_hd_q_unc_err_cnt),
4249[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4250 CNTR_NORMAL,
4251 access_pcic_retry_sot_mem_unc_err_cnt),
4252[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4253 CNTR_NORMAL,
4254 access_pcic_retry_mem_unc_err),
4255[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4256 CNTR_NORMAL,
4257 access_pcic_n_post_dat_q_parity_err_cnt),
4258[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4259 CNTR_NORMAL,
4260 access_pcic_n_post_h_q_parity_err_cnt),
4261[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4262 CNTR_NORMAL,
4263 access_pcic_cpl_dat_q_cor_err_cnt),
4264[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4265 CNTR_NORMAL,
4266 access_pcic_cpl_hd_q_cor_err_cnt),
4267[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4268 CNTR_NORMAL,
4269 access_pcic_post_dat_q_cor_err_cnt),
4270[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4271 CNTR_NORMAL,
4272 access_pcic_post_hd_q_cor_err_cnt),
4273[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4274 CNTR_NORMAL,
4275 access_pcic_retry_sot_mem_cor_err_cnt),
4276[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_pcic_retry_mem_cor_err_cnt),
4279[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4280 "CceCli1AsyncFifoDbgParityError", 0, 0,
4281 CNTR_NORMAL,
4282 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4283[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4284 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4285 CNTR_NORMAL,
4286 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4287 ),
4288[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4289 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4290 CNTR_NORMAL,
4291 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4292[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4293 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4294 CNTR_NORMAL,
4295 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4296[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4297 0, CNTR_NORMAL,
4298 access_cce_cli2_async_fifo_parity_err_cnt),
4299[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_cce_csr_cfg_bus_parity_err_cnt),
4302[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4303 0, CNTR_NORMAL,
4304 access_cce_cli0_async_fifo_parity_err_cnt),
4305[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4306 CNTR_NORMAL,
4307 access_cce_rspd_data_parity_err_cnt),
4308[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4309 CNTR_NORMAL,
4310 access_cce_trgt_access_err_cnt),
4311[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4312 0, CNTR_NORMAL,
4313 access_cce_trgt_async_fifo_parity_err_cnt),
4314[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_cce_csr_write_bad_addr_err_cnt),
4317[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4318 CNTR_NORMAL,
4319 access_cce_csr_read_bad_addr_err_cnt),
4320[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_ccs_csr_parity_err_cnt),
4323
4324/* RcvErrStatus */
4325[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4326 CNTR_NORMAL,
4327 access_rx_csr_parity_err_cnt),
4328[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4329 CNTR_NORMAL,
4330 access_rx_csr_write_bad_addr_err_cnt),
4331[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_rx_csr_read_bad_addr_err_cnt),
4334[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4335 CNTR_NORMAL,
4336 access_rx_dma_csr_unc_err_cnt),
4337[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_rx_dma_dq_fsm_encoding_err_cnt),
4340[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4341 CNTR_NORMAL,
4342 access_rx_dma_eq_fsm_encoding_err_cnt),
4343[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_rx_dma_csr_parity_err_cnt),
4346[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_rx_rbuf_data_cor_err_cnt),
4349[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4350 CNTR_NORMAL,
4351 access_rx_rbuf_data_unc_err_cnt),
4352[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4353 CNTR_NORMAL,
4354 access_rx_dma_data_fifo_rd_cor_err_cnt),
4355[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4356 CNTR_NORMAL,
4357 access_rx_dma_data_fifo_rd_unc_err_cnt),
4358[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4359 CNTR_NORMAL,
4360 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4361[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4362 CNTR_NORMAL,
4363 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4364[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_rx_rbuf_desc_part2_cor_err_cnt),
4367[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_rx_rbuf_desc_part2_unc_err_cnt),
4370[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_rx_rbuf_desc_part1_cor_err_cnt),
4373[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_rx_rbuf_desc_part1_unc_err_cnt),
4376[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_rx_hq_intr_fsm_err_cnt),
4379[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_rx_hq_intr_csr_parity_err_cnt),
4382[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_lookup_csr_parity_err_cnt),
4385[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_lookup_rcv_array_cor_err_cnt),
4388[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_lookup_rcv_array_unc_err_cnt),
4391[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4392 0, CNTR_NORMAL,
4393 access_rx_lookup_des_part2_parity_err_cnt),
4394[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4395 0, CNTR_NORMAL,
4396 access_rx_lookup_des_part1_unc_cor_err_cnt),
4397[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4398 CNTR_NORMAL,
4399 access_rx_lookup_des_part1_unc_err_cnt),
4400[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4401 CNTR_NORMAL,
4402 access_rx_rbuf_next_free_buf_cor_err_cnt),
4403[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_rbuf_next_free_buf_unc_err_cnt),
4406[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4407 "RxRbufFlInitWrAddrParityErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4410[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4411 0, CNTR_NORMAL,
4412 access_rx_rbuf_fl_initdone_parity_err_cnt),
4413[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4414 0, CNTR_NORMAL,
4415 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4416[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4419[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4420 CNTR_NORMAL,
4421 access_rx_rbuf_empty_err_cnt),
4422[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_rbuf_full_err_cnt),
4425[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rbuf_bad_lookup_err_cnt),
4428[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4429 CNTR_NORMAL,
4430 access_rbuf_ctx_id_parity_err_cnt),
4431[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_rbuf_csr_qeopdw_parity_err_cnt),
4434[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4435 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4436 CNTR_NORMAL,
4437 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4438[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4439 "RxRbufCsrQTlPtrParityErr", 0, 0,
4440 CNTR_NORMAL,
4441 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4442[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4443 0, CNTR_NORMAL,
4444 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4445[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4446 0, CNTR_NORMAL,
4447 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4448[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4449 0, 0, CNTR_NORMAL,
4450 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4451[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4452 0, CNTR_NORMAL,
4453 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4454[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4455 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4456 CNTR_NORMAL,
4457 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4458[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4459 0, CNTR_NORMAL,
4460 access_rx_rbuf_block_list_read_cor_err_cnt),
4461[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4462 0, CNTR_NORMAL,
4463 access_rx_rbuf_block_list_read_unc_err_cnt),
4464[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_rx_rbuf_lookup_des_cor_err_cnt),
4467[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_rx_rbuf_lookup_des_unc_err_cnt),
4470[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4471 "RxRbufLookupDesRegUncCorErr", 0, 0,
4472 CNTR_NORMAL,
4473 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4474[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4475 CNTR_NORMAL,
4476 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4477[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4478 CNTR_NORMAL,
4479 access_rx_rbuf_free_list_cor_err_cnt),
4480[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4481 CNTR_NORMAL,
4482 access_rx_rbuf_free_list_unc_err_cnt),
4483[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4484 CNTR_NORMAL,
4485 access_rx_rcv_fsm_encoding_err_cnt),
4486[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4487 CNTR_NORMAL,
4488 access_rx_dma_flag_cor_err_cnt),
4489[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4490 CNTR_NORMAL,
4491 access_rx_dma_flag_unc_err_cnt),
4492[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_dc_sop_eop_parity_err_cnt),
4495[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4496 CNTR_NORMAL,
4497 access_rx_rcv_csr_parity_err_cnt),
4498[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4499 CNTR_NORMAL,
4500 access_rx_rcv_qp_map_table_cor_err_cnt),
4501[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rcv_qp_map_table_unc_err_cnt),
4504[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rcv_data_cor_err_cnt),
4507[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4508 CNTR_NORMAL,
4509 access_rx_rcv_data_unc_err_cnt),
4510[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rcv_hdr_cor_err_cnt),
4513[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_rx_rcv_hdr_unc_err_cnt),
4516[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_rx_dc_intf_parity_err_cnt),
4519[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_rx_dma_csr_cor_err_cnt),
4522/* SendPioErrStatus */
4523[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4524 CNTR_NORMAL,
4525 access_pio_pec_sop_head_parity_err_cnt),
4526[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_pio_pcc_sop_head_parity_err_cnt),
4529[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4530 0, 0, CNTR_NORMAL,
4531 access_pio_last_returned_cnt_parity_err_cnt),
4532[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4533 0, CNTR_NORMAL,
4534 access_pio_current_free_cnt_parity_err_cnt),
4535[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4536 CNTR_NORMAL,
4537 access_pio_reserved_31_err_cnt),
4538[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4539 CNTR_NORMAL,
4540 access_pio_reserved_30_err_cnt),
4541[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4542 CNTR_NORMAL,
4543 access_pio_ppmc_sop_len_err_cnt),
4544[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4545 CNTR_NORMAL,
4546 access_pio_ppmc_bqc_mem_parity_err_cnt),
4547[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_pio_vl_fifo_parity_err_cnt),
4550[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_pio_vlf_sop_parity_err_cnt),
4553[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_pio_vlf_v1_len_parity_err_cnt),
4556[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_pio_block_qw_count_parity_err_cnt),
4559[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4560 CNTR_NORMAL,
4561 access_pio_write_qw_valid_parity_err_cnt),
4562[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4563 CNTR_NORMAL,
4564 access_pio_state_machine_err_cnt),
4565[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4566 CNTR_NORMAL,
4567 access_pio_write_data_parity_err_cnt),
4568[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4569 CNTR_NORMAL,
4570 access_pio_host_addr_mem_cor_err_cnt),
4571[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4572 CNTR_NORMAL,
4573 access_pio_host_addr_mem_unc_err_cnt),
4574[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4575 CNTR_NORMAL,
4576 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4577[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4578 CNTR_NORMAL,
4579 access_pio_init_sm_in_err_cnt),
4580[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_pio_ppmc_pbl_fifo_err_cnt),
4583[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4584 0, CNTR_NORMAL,
4585 access_pio_credit_ret_fifo_parity_err_cnt),
4586[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_pio_v1_len_mem_bank1_cor_err_cnt),
4589[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4590 CNTR_NORMAL,
4591 access_pio_v1_len_mem_bank0_cor_err_cnt),
4592[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_v1_len_mem_bank1_unc_err_cnt),
4595[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_v1_len_mem_bank0_unc_err_cnt),
4598[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_sm_pkt_reset_parity_err_cnt),
4601[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_pkt_evict_fifo_parity_err_cnt),
4604[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4605 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4608[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_sbrdctl_crrel_parity_err_cnt),
4611[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_pec_fifo_parity_err_cnt),
4614[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_pcc_fifo_parity_err_cnt),
4617[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_sb_mem_fifo1_err_cnt),
4620[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4621 CNTR_NORMAL,
4622 access_pio_sb_mem_fifo0_err_cnt),
4623[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_csr_parity_err_cnt),
4626[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_write_addr_parity_err_cnt),
4629[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_write_bad_ctxt_err_cnt),
4632/* SendDmaErrStatus */
4633[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4634 0, CNTR_NORMAL,
4635 access_sdma_pcie_req_tracking_cor_err_cnt),
4636[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4637 0, CNTR_NORMAL,
4638 access_sdma_pcie_req_tracking_unc_err_cnt),
4639[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_sdma_csr_parity_err_cnt),
4642[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_sdma_rpy_tag_err_cnt),
4645/* SendEgressErrStatus */
4646[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4647 CNTR_NORMAL,
4648 access_tx_read_pio_memory_csr_unc_err_cnt),
4649[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4650 0, CNTR_NORMAL,
4651 access_tx_read_sdma_memory_csr_err_cnt),
4652[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4653 CNTR_NORMAL,
4654 access_tx_egress_fifo_cor_err_cnt),
4655[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4656 CNTR_NORMAL,
4657 access_tx_read_pio_memory_cor_err_cnt),
4658[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4659 CNTR_NORMAL,
4660 access_tx_read_sdma_memory_cor_err_cnt),
4661[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_tx_sb_hdr_cor_err_cnt),
4664[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_tx_credit_overrun_err_cnt),
4667[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_tx_launch_fifo8_cor_err_cnt),
4670[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_tx_launch_fifo7_cor_err_cnt),
4673[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_tx_launch_fifo6_cor_err_cnt),
4676[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_tx_launch_fifo5_cor_err_cnt),
4679[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_tx_launch_fifo4_cor_err_cnt),
4682[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_tx_launch_fifo3_cor_err_cnt),
4685[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_tx_launch_fifo2_cor_err_cnt),
4688[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_tx_launch_fifo1_cor_err_cnt),
4691[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_tx_launch_fifo0_cor_err_cnt),
4694[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_tx_credit_return_vl_err_cnt),
4697[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_tx_hcrc_insertion_err_cnt),
4700[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_tx_egress_fifo_unc_err_cnt),
4703[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_tx_read_pio_memory_unc_err_cnt),
4706[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_tx_read_sdma_memory_unc_err_cnt),
4709[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_tx_sb_hdr_unc_err_cnt),
4712[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_tx_credit_return_partiy_err_cnt),
4715[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4716 0, 0, CNTR_NORMAL,
4717 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4718[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4719 0, 0, CNTR_NORMAL,
4720 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4721[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4722 0, 0, CNTR_NORMAL,
4723 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4724[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4725 0, 0, CNTR_NORMAL,
4726 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4727[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4728 0, 0, CNTR_NORMAL,
4729 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4730[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4731 0, 0, CNTR_NORMAL,
4732 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4733[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4734 0, 0, CNTR_NORMAL,
4735 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4736[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4737 0, 0, CNTR_NORMAL,
4738 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4739[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4740 0, 0, CNTR_NORMAL,
4741 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4742[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4743 0, 0, CNTR_NORMAL,
4744 access_tx_sdma15_disallowed_packet_err_cnt),
4745[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4746 0, 0, CNTR_NORMAL,
4747 access_tx_sdma14_disallowed_packet_err_cnt),
4748[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4749 0, 0, CNTR_NORMAL,
4750 access_tx_sdma13_disallowed_packet_err_cnt),
4751[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4752 0, 0, CNTR_NORMAL,
4753 access_tx_sdma12_disallowed_packet_err_cnt),
4754[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4755 0, 0, CNTR_NORMAL,
4756 access_tx_sdma11_disallowed_packet_err_cnt),
4757[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4758 0, 0, CNTR_NORMAL,
4759 access_tx_sdma10_disallowed_packet_err_cnt),
4760[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4761 0, 0, CNTR_NORMAL,
4762 access_tx_sdma9_disallowed_packet_err_cnt),
4763[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4764 0, 0, CNTR_NORMAL,
4765 access_tx_sdma8_disallowed_packet_err_cnt),
4766[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4767 0, 0, CNTR_NORMAL,
4768 access_tx_sdma7_disallowed_packet_err_cnt),
4769[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4770 0, 0, CNTR_NORMAL,
4771 access_tx_sdma6_disallowed_packet_err_cnt),
4772[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4773 0, 0, CNTR_NORMAL,
4774 access_tx_sdma5_disallowed_packet_err_cnt),
4775[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4776 0, 0, CNTR_NORMAL,
4777 access_tx_sdma4_disallowed_packet_err_cnt),
4778[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4779 0, 0, CNTR_NORMAL,
4780 access_tx_sdma3_disallowed_packet_err_cnt),
4781[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4782 0, 0, CNTR_NORMAL,
4783 access_tx_sdma2_disallowed_packet_err_cnt),
4784[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4785 0, 0, CNTR_NORMAL,
4786 access_tx_sdma1_disallowed_packet_err_cnt),
4787[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4788 0, 0, CNTR_NORMAL,
4789 access_tx_sdma0_disallowed_packet_err_cnt),
4790[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_tx_config_parity_err_cnt),
4793[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4794 CNTR_NORMAL,
4795 access_tx_sbrd_ctl_csr_parity_err_cnt),
4796[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_launch_csr_parity_err_cnt),
4799[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_illegal_vl_err_cnt),
4802[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4803 "TxSbrdCtlStateMachineParityErr", 0, 0,
4804 CNTR_NORMAL,
4805 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4806[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4807 CNTR_NORMAL,
4808 access_egress_reserved_10_err_cnt),
4809[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4810 CNTR_NORMAL,
4811 access_egress_reserved_9_err_cnt),
4812[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4813 0, 0, CNTR_NORMAL,
4814 access_tx_sdma_launch_intf_parity_err_cnt),
4815[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4816 CNTR_NORMAL,
4817 access_tx_pio_launch_intf_parity_err_cnt),
4818[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4819 CNTR_NORMAL,
4820 access_egress_reserved_6_err_cnt),
4821[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_incorrect_link_state_err_cnt),
4824[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4825 CNTR_NORMAL,
4826 access_tx_linkdown_err_cnt),
4827[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4828 "EgressFifoUnderrunOrParityErr", 0, 0,
4829 CNTR_NORMAL,
4830 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4831[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4832 CNTR_NORMAL,
4833 access_egress_reserved_2_err_cnt),
4834[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_tx_pkt_integrity_mem_unc_err_cnt),
4837[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4838 CNTR_NORMAL,
4839 access_tx_pkt_integrity_mem_cor_err_cnt),
4840/* SendErrStatus */
4841[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_send_csr_write_bad_addr_err_cnt),
4844[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4845 CNTR_NORMAL,
4846 access_send_csr_read_bad_addr_err_cnt),
4847[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_send_csr_parity_cnt),
4850/* SendCtxtErrStatus */
4851[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4852 CNTR_NORMAL,
4853 access_pio_write_out_of_bounds_err_cnt),
4854[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4855 CNTR_NORMAL,
4856 access_pio_write_overflow_err_cnt),
4857[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4858 0, 0, CNTR_NORMAL,
4859 access_pio_write_crosses_boundary_err_cnt),
4860[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_pio_disallowed_packet_err_cnt),
4863[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4864 CNTR_NORMAL,
4865 access_pio_inconsistent_sop_err_cnt),
4866/* SendDmaEngErrStatus */
4867[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4868 0, 0, CNTR_NORMAL,
4869 access_sdma_header_request_fifo_cor_err_cnt),
4870[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4871 CNTR_NORMAL,
4872 access_sdma_header_storage_cor_err_cnt),
4873[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4874 CNTR_NORMAL,
4875 access_sdma_packet_tracking_cor_err_cnt),
4876[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4877 CNTR_NORMAL,
4878 access_sdma_assembly_cor_err_cnt),
4879[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4880 CNTR_NORMAL,
4881 access_sdma_desc_table_cor_err_cnt),
4882[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4883 0, 0, CNTR_NORMAL,
4884 access_sdma_header_request_fifo_unc_err_cnt),
4885[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_sdma_header_storage_unc_err_cnt),
4888[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4889 CNTR_NORMAL,
4890 access_sdma_packet_tracking_unc_err_cnt),
4891[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_sdma_assembly_unc_err_cnt),
4894[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4895 CNTR_NORMAL,
4896 access_sdma_desc_table_unc_err_cnt),
4897[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_sdma_timeout_err_cnt),
4900[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4901 CNTR_NORMAL,
4902 access_sdma_header_length_err_cnt),
4903[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4904 CNTR_NORMAL,
4905 access_sdma_header_address_err_cnt),
4906[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4907 CNTR_NORMAL,
4908 access_sdma_header_select_err_cnt),
4909[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4910 CNTR_NORMAL,
4911 access_sdma_reserved_9_err_cnt),
4912[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_sdma_packet_desc_overflow_err_cnt),
4915[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4916 CNTR_NORMAL,
4917 access_sdma_length_mismatch_err_cnt),
4918[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_sdma_halt_err_cnt),
4921[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_sdma_mem_read_err_cnt),
4924[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_sdma_first_desc_err_cnt),
4927[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4928 CNTR_NORMAL,
4929 access_sdma_tail_out_of_bounds_err_cnt),
4930[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_sdma_too_long_err_cnt),
4933[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_sdma_gen_mismatch_err_cnt),
4936[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4937 CNTR_NORMAL,
4938 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004939};
4940
4941static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4942[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4943 CNTR_NORMAL),
4944[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4945 CNTR_NORMAL),
4946[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4947 CNTR_NORMAL),
4948[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4949 CNTR_NORMAL),
4950[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4951 CNTR_NORMAL),
4952[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4953 CNTR_NORMAL),
4954[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4955 CNTR_NORMAL),
4956[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4957[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4958[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4959[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4960 CNTR_SYNTH | CNTR_VL),
4961[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4962 CNTR_SYNTH | CNTR_VL),
4963[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4964 CNTR_SYNTH | CNTR_VL),
4965[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4966[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4967[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4968 access_sw_link_dn_cnt),
4969[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4970 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004971[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4972 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004973[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4974 access_sw_xmit_discards),
4975[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4976 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4977 access_sw_xmit_discards),
4978[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4979 access_xmit_constraint_errs),
4980[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4981 access_rcv_constraint_errs),
4982[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4983[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4984[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4985[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4986[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4987[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4988[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4989[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4990[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4991[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4992[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4993[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4994[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4995 access_sw_cpu_rc_acks),
4996[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4997 access_sw_cpu_rc_qacks),
4998[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4999 access_sw_cpu_rc_delayed_comp),
5000[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5001[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5002[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5003[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5004[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5005[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5006[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5007[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5008[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5009[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5010[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5011[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5012[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5013[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5014[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5015[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5016[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5017[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5018[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5019[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5020[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5021[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5022[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5023[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5024[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5025[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5026[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5027[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5028[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5029[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5030[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5031[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5032[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5033[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5034[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5035[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5036[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5037[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5038[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5039[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5040[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5041[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5042[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5043[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5044[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5045[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5046[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5047[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5048[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5049[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5050[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5051[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5052[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5053[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5054[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5055[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5056[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5057[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5058[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5059[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5060[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5061[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5062[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5063[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5064[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5065[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5066[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5067[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5068[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5069[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5070[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5071[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5072[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5073[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5074[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5075[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5076[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5077[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5078[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5079[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5080};
5081
5082/* ======================================================================== */
5083
Mike Marciniszyn77241052015-07-30 15:17:43 -04005084/* return true if this is chip revision revision a */
5085int is_ax(struct hfi1_devdata *dd)
5086{
5087 u8 chip_rev_minor =
5088 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5089 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5090 return (chip_rev_minor & 0xf0) == 0;
5091}
5092
5093/* return true if this is chip revision revision b */
5094int is_bx(struct hfi1_devdata *dd)
5095{
5096 u8 chip_rev_minor =
5097 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5098 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005099 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005100}
5101
5102/*
5103 * Append string s to buffer buf. Arguments curp and len are the current
5104 * position and remaining length, respectively.
5105 *
5106 * return 0 on success, 1 on out of room
5107 */
5108static int append_str(char *buf, char **curp, int *lenp, const char *s)
5109{
5110 char *p = *curp;
5111 int len = *lenp;
5112 int result = 0; /* success */
5113 char c;
5114
5115 /* add a comma, if first in the buffer */
5116 if (p != buf) {
5117 if (len == 0) {
5118 result = 1; /* out of room */
5119 goto done;
5120 }
5121 *p++ = ',';
5122 len--;
5123 }
5124
5125 /* copy the string */
5126 while ((c = *s++) != 0) {
5127 if (len == 0) {
5128 result = 1; /* out of room */
5129 goto done;
5130 }
5131 *p++ = c;
5132 len--;
5133 }
5134
5135done:
5136 /* write return values */
5137 *curp = p;
5138 *lenp = len;
5139
5140 return result;
5141}
5142
5143/*
5144 * Using the given flag table, print a comma separated string into
5145 * the buffer. End in '*' if the buffer is too short.
5146 */
5147static char *flag_string(char *buf, int buf_len, u64 flags,
5148 struct flag_table *table, int table_size)
5149{
5150 char extra[32];
5151 char *p = buf;
5152 int len = buf_len;
5153 int no_room = 0;
5154 int i;
5155
5156 /* make sure there is at least 2 so we can form "*" */
5157 if (len < 2)
5158 return "";
5159
5160 len--; /* leave room for a nul */
5161 for (i = 0; i < table_size; i++) {
5162 if (flags & table[i].flag) {
5163 no_room = append_str(buf, &p, &len, table[i].str);
5164 if (no_room)
5165 break;
5166 flags &= ~table[i].flag;
5167 }
5168 }
5169
5170 /* any undocumented bits left? */
5171 if (!no_room && flags) {
5172 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5173 no_room = append_str(buf, &p, &len, extra);
5174 }
5175
5176 /* add * if ran out of room */
5177 if (no_room) {
5178 /* may need to back up to add space for a '*' */
5179 if (len == 0)
5180 --p;
5181 *p++ = '*';
5182 }
5183
5184 /* add final nul - space already allocated above */
5185 *p = 0;
5186 return buf;
5187}
5188
5189/* first 8 CCE error interrupt source names */
5190static const char * const cce_misc_names[] = {
5191 "CceErrInt", /* 0 */
5192 "RxeErrInt", /* 1 */
5193 "MiscErrInt", /* 2 */
5194 "Reserved3", /* 3 */
5195 "PioErrInt", /* 4 */
5196 "SDmaErrInt", /* 5 */
5197 "EgressErrInt", /* 6 */
5198 "TxeErrInt" /* 7 */
5199};
5200
5201/*
5202 * Return the miscellaneous error interrupt name.
5203 */
5204static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5205{
5206 if (source < ARRAY_SIZE(cce_misc_names))
5207 strncpy(buf, cce_misc_names[source], bsize);
5208 else
5209 snprintf(buf,
5210 bsize,
5211 "Reserved%u",
5212 source + IS_GENERAL_ERR_START);
5213
5214 return buf;
5215}
5216
5217/*
5218 * Return the SDMA engine error interrupt name.
5219 */
5220static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5221{
5222 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5223 return buf;
5224}
5225
5226/*
5227 * Return the send context error interrupt name.
5228 */
5229static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5230{
5231 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5232 return buf;
5233}
5234
5235static const char * const various_names[] = {
5236 "PbcInt",
5237 "GpioAssertInt",
5238 "Qsfp1Int",
5239 "Qsfp2Int",
5240 "TCritInt"
5241};
5242
5243/*
5244 * Return the various interrupt name.
5245 */
5246static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5247{
5248 if (source < ARRAY_SIZE(various_names))
5249 strncpy(buf, various_names[source], bsize);
5250 else
5251 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5252 return buf;
5253}
5254
5255/*
5256 * Return the DC interrupt name.
5257 */
5258static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5259{
5260 static const char * const dc_int_names[] = {
5261 "common",
5262 "lcb",
5263 "8051",
5264 "lbm" /* local block merge */
5265 };
5266
5267 if (source < ARRAY_SIZE(dc_int_names))
5268 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5269 else
5270 snprintf(buf, bsize, "DCInt%u", source);
5271 return buf;
5272}
5273
5274static const char * const sdma_int_names[] = {
5275 "SDmaInt",
5276 "SdmaIdleInt",
5277 "SdmaProgressInt",
5278};
5279
5280/*
5281 * Return the SDMA engine interrupt name.
5282 */
5283static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5284{
5285 /* what interrupt */
5286 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5287 /* which engine */
5288 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5289
5290 if (likely(what < 3))
5291 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5292 else
5293 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5294 return buf;
5295}
5296
5297/*
5298 * Return the receive available interrupt name.
5299 */
5300static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5301{
5302 snprintf(buf, bsize, "RcvAvailInt%u", source);
5303 return buf;
5304}
5305
5306/*
5307 * Return the receive urgent interrupt name.
5308 */
5309static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5310{
5311 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5312 return buf;
5313}
5314
5315/*
5316 * Return the send credit interrupt name.
5317 */
5318static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5319{
5320 snprintf(buf, bsize, "SendCreditInt%u", source);
5321 return buf;
5322}
5323
5324/*
5325 * Return the reserved interrupt name.
5326 */
5327static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5328{
5329 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5330 return buf;
5331}
5332
5333static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5334{
5335 return flag_string(buf, buf_len, flags,
5336 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5337}
5338
5339static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5340{
5341 return flag_string(buf, buf_len, flags,
5342 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5343}
5344
5345static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5346{
5347 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5348 ARRAY_SIZE(misc_err_status_flags));
5349}
5350
5351static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5352{
5353 return flag_string(buf, buf_len, flags,
5354 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5355}
5356
5357static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5358{
5359 return flag_string(buf, buf_len, flags,
5360 sdma_err_status_flags,
5361 ARRAY_SIZE(sdma_err_status_flags));
5362}
5363
5364static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5365{
5366 return flag_string(buf, buf_len, flags,
5367 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5368}
5369
5370static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5371{
5372 return flag_string(buf, buf_len, flags,
5373 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5374}
5375
5376static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5377{
5378 return flag_string(buf, buf_len, flags,
5379 send_err_status_flags,
5380 ARRAY_SIZE(send_err_status_flags));
5381}
5382
5383static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5384{
5385 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005386 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005387
5388 /*
5389 * For most these errors, there is nothing that can be done except
5390 * report or record it.
5391 */
5392 dd_dev_info(dd, "CCE Error: %s\n",
5393 cce_err_status_string(buf, sizeof(buf), reg));
5394
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005395 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5396 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005397 /* this error requires a manual drop into SPC freeze mode */
5398 /* then a fix up */
5399 start_freeze_handling(dd->pport, FREEZE_SELF);
5400 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005401
5402 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5403 if (reg & (1ull << i)) {
5404 incr_cntr64(&dd->cce_err_status_cnt[i]);
5405 /* maintain a counter over all cce_err_status errors */
5406 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5407 }
5408 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005409}
5410
5411/*
5412 * Check counters for receive errors that do not have an interrupt
5413 * associated with them.
5414 */
5415#define RCVERR_CHECK_TIME 10
5416static void update_rcverr_timer(unsigned long opaque)
5417{
5418 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5419 struct hfi1_pportdata *ppd = dd->pport;
5420 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5421
5422 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5423 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5424 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5425 set_link_down_reason(ppd,
5426 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5427 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5428 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5429 }
5430 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5431
5432 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5433}
5434
5435static int init_rcverr(struct hfi1_devdata *dd)
5436{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305437 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005438 /* Assume the hardware counter has been reset */
5439 dd->rcv_ovfl_cnt = 0;
5440 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5441}
5442
5443static void free_rcverr(struct hfi1_devdata *dd)
5444{
5445 if (dd->rcverr_timer.data)
5446 del_timer_sync(&dd->rcverr_timer);
5447 dd->rcverr_timer.data = 0;
5448}
5449
5450static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5451{
5452 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005453 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005454
5455 dd_dev_info(dd, "Receive Error: %s\n",
5456 rxe_err_status_string(buf, sizeof(buf), reg));
5457
5458 if (reg & ALL_RXE_FREEZE_ERR) {
5459 int flags = 0;
5460
5461 /*
5462 * Freeze mode recovery is disabled for the errors
5463 * in RXE_FREEZE_ABORT_MASK
5464 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005465 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005466 flags = FREEZE_ABORT;
5467
5468 start_freeze_handling(dd->pport, flags);
5469 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005470
5471 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5472 if (reg & (1ull << i))
5473 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5474 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005475}
5476
5477static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5478{
5479 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005480 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005481
5482 dd_dev_info(dd, "Misc Error: %s",
5483 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005484 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5485 if (reg & (1ull << i))
5486 incr_cntr64(&dd->misc_err_status_cnt[i]);
5487 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005488}
5489
5490static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5491{
5492 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005493 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005494
5495 dd_dev_info(dd, "PIO Error: %s\n",
5496 pio_err_status_string(buf, sizeof(buf), reg));
5497
5498 if (reg & ALL_PIO_FREEZE_ERR)
5499 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005500
5501 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5502 if (reg & (1ull << i))
5503 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5504 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005505}
5506
5507static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5508{
5509 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005510 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005511
5512 dd_dev_info(dd, "SDMA Error: %s\n",
5513 sdma_err_status_string(buf, sizeof(buf), reg));
5514
5515 if (reg & ALL_SDMA_FREEZE_ERR)
5516 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005517
5518 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5519 if (reg & (1ull << i))
5520 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5521 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005522}
5523
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005524static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5525{
5526 incr_cntr64(&ppd->port_xmit_discards);
5527}
5528
Mike Marciniszyn77241052015-07-30 15:17:43 -04005529static void count_port_inactive(struct hfi1_devdata *dd)
5530{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005531 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005532}
5533
5534/*
5535 * We have had a "disallowed packet" error during egress. Determine the
5536 * integrity check which failed, and update relevant error counter, etc.
5537 *
5538 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5539 * bit of state per integrity check, and so we can miss the reason for an
5540 * egress error if more than one packet fails the same integrity check
5541 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5542 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005543static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5544 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005545{
5546 struct hfi1_pportdata *ppd = dd->pport;
5547 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5548 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5549 char buf[96];
5550
5551 /* clear down all observed info as quickly as possible after read */
5552 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5553
5554 dd_dev_info(dd,
5555 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5556 info, egress_err_info_string(buf, sizeof(buf), info), src);
5557
5558 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005559 if (info & PORT_DISCARD_EGRESS_ERRS) {
5560 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005561
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005562 /*
5563 * Count all, in case multiple bits are set. Reminder:
5564 * since there is only one info register for many sources,
5565 * these may be attributed to the wrong VL if they occur
5566 * too close together.
5567 */
5568 weight = hweight64(info);
5569 for (i = 0; i < weight; i++) {
5570 __count_port_discards(ppd);
5571 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5572 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5573 else if (vl == 15)
5574 incr_cntr64(&ppd->port_xmit_discards_vl
5575 [C_VL_15]);
5576 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005577 }
5578}
5579
5580/*
5581 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5582 * register. Does it represent a 'port inactive' error?
5583 */
5584static inline int port_inactive_err(u64 posn)
5585{
5586 return (posn >= SEES(TX_LINKDOWN) &&
5587 posn <= SEES(TX_INCORRECT_LINK_STATE));
5588}
5589
5590/*
5591 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5592 * register. Does it represent a 'disallowed packet' error?
5593 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005594static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005595{
5596 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5597 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5598}
5599
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005600/*
5601 * Input value is a bit position of one of the SDMA engine disallowed
5602 * packet errors. Return which engine. Use of this must be guarded by
5603 * disallowed_pkt_err().
5604 */
5605static inline int disallowed_pkt_engine(int posn)
5606{
5607 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5608}
5609
5610/*
5611 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5612 * be done.
5613 */
5614static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5615{
5616 struct sdma_vl_map *m;
5617 int vl;
5618
5619 /* range check */
5620 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5621 return -1;
5622
5623 rcu_read_lock();
5624 m = rcu_dereference(dd->sdma_map);
5625 vl = m->engine_to_vl[engine];
5626 rcu_read_unlock();
5627
5628 return vl;
5629}
5630
5631/*
5632 * Translate the send context (sofware index) into a VL. Return -1 if the
5633 * translation cannot be done.
5634 */
5635static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5636{
5637 struct send_context_info *sci;
5638 struct send_context *sc;
5639 int i;
5640
5641 sci = &dd->send_contexts[sw_index];
5642
5643 /* there is no information for user (PSM) and ack contexts */
5644 if (sci->type != SC_KERNEL)
5645 return -1;
5646
5647 sc = sci->sc;
5648 if (!sc)
5649 return -1;
5650 if (dd->vld[15].sc == sc)
5651 return 15;
5652 for (i = 0; i < num_vls; i++)
5653 if (dd->vld[i].sc == sc)
5654 return i;
5655
5656 return -1;
5657}
5658
Mike Marciniszyn77241052015-07-30 15:17:43 -04005659static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5660{
5661 u64 reg_copy = reg, handled = 0;
5662 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005663 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005664
5665 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5666 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005667 else if (is_ax(dd) &&
5668 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5669 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005670 start_freeze_handling(dd->pport, 0);
5671
5672 while (reg_copy) {
5673 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005674 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005675 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005676 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005677
5678 if (port_inactive_err(shift)) {
5679 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005680 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005681 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005682 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5683
5684 handle_send_egress_err_info(dd, vl);
5685 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005686 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005687 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005688 }
5689
5690 reg &= ~handled;
5691
5692 if (reg)
5693 dd_dev_info(dd, "Egress Error: %s\n",
5694 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005695
5696 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5697 if (reg & (1ull << i))
5698 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5699 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005700}
5701
5702static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5703{
5704 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005705 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005706
5707 dd_dev_info(dd, "Send Error: %s\n",
5708 send_err_status_string(buf, sizeof(buf), reg));
5709
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005710 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5711 if (reg & (1ull << i))
5712 incr_cntr64(&dd->send_err_status_cnt[i]);
5713 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005714}
5715
5716/*
5717 * The maximum number of times the error clear down will loop before
5718 * blocking a repeating error. This value is arbitrary.
5719 */
5720#define MAX_CLEAR_COUNT 20
5721
5722/*
5723 * Clear and handle an error register. All error interrupts are funneled
5724 * through here to have a central location to correctly handle single-
5725 * or multi-shot errors.
5726 *
5727 * For non per-context registers, call this routine with a context value
5728 * of 0 so the per-context offset is zero.
5729 *
5730 * If the handler loops too many times, assume that something is wrong
5731 * and can't be fixed, so mask the error bits.
5732 */
5733static void interrupt_clear_down(struct hfi1_devdata *dd,
5734 u32 context,
5735 const struct err_reg_info *eri)
5736{
5737 u64 reg;
5738 u32 count;
5739
5740 /* read in a loop until no more errors are seen */
5741 count = 0;
5742 while (1) {
5743 reg = read_kctxt_csr(dd, context, eri->status);
5744 if (reg == 0)
5745 break;
5746 write_kctxt_csr(dd, context, eri->clear, reg);
5747 if (likely(eri->handler))
5748 eri->handler(dd, context, reg);
5749 count++;
5750 if (count > MAX_CLEAR_COUNT) {
5751 u64 mask;
5752
5753 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5754 eri->desc, reg);
5755 /*
5756 * Read-modify-write so any other masked bits
5757 * remain masked.
5758 */
5759 mask = read_kctxt_csr(dd, context, eri->mask);
5760 mask &= ~reg;
5761 write_kctxt_csr(dd, context, eri->mask, mask);
5762 break;
5763 }
5764 }
5765}
5766
5767/*
5768 * CCE block "misc" interrupt. Source is < 16.
5769 */
5770static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5771{
5772 const struct err_reg_info *eri = &misc_errs[source];
5773
5774 if (eri->handler) {
5775 interrupt_clear_down(dd, 0, eri);
5776 } else {
5777 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5778 source);
5779 }
5780}
5781
5782static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5783{
5784 return flag_string(buf, buf_len, flags,
5785 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5786}
5787
5788/*
5789 * Send context error interrupt. Source (hw_context) is < 160.
5790 *
5791 * All send context errors cause the send context to halt. The normal
5792 * clear-down mechanism cannot be used because we cannot clear the
5793 * error bits until several other long-running items are done first.
5794 * This is OK because with the context halted, nothing else is going
5795 * to happen on it anyway.
5796 */
5797static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5798 unsigned int hw_context)
5799{
5800 struct send_context_info *sci;
5801 struct send_context *sc;
5802 char flags[96];
5803 u64 status;
5804 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005805 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005806
5807 sw_index = dd->hw_to_sw[hw_context];
5808 if (sw_index >= dd->num_send_contexts) {
5809 dd_dev_err(dd,
5810 "out of range sw index %u for send context %u\n",
5811 sw_index, hw_context);
5812 return;
5813 }
5814 sci = &dd->send_contexts[sw_index];
5815 sc = sci->sc;
5816 if (!sc) {
5817 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5818 sw_index, hw_context);
5819 return;
5820 }
5821
5822 /* tell the software that a halt has begun */
5823 sc_stop(sc, SCF_HALTED);
5824
5825 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5826
5827 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5828 send_context_err_status_string(flags, sizeof(flags), status));
5829
5830 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005831 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005832
5833 /*
5834 * Automatically restart halted kernel contexts out of interrupt
5835 * context. User contexts must ask the driver to restart the context.
5836 */
5837 if (sc->type != SC_USER)
5838 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005839
5840 /*
5841 * Update the counters for the corresponding status bits.
5842 * Note that these particular counters are aggregated over all
5843 * 160 contexts.
5844 */
5845 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5846 if (status & (1ull << i))
5847 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5848 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005849}
5850
5851static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5852 unsigned int source, u64 status)
5853{
5854 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005855 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005856
5857 sde = &dd->per_sdma[source];
5858#ifdef CONFIG_SDMA_VERBOSITY
5859 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5860 slashstrip(__FILE__), __LINE__, __func__);
5861 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5862 sde->this_idx, source, (unsigned long long)status);
5863#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005864 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005865 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005866
5867 /*
5868 * Update the counters for the corresponding status bits.
5869 * Note that these particular counters are aggregated over
5870 * all 16 DMA engines.
5871 */
5872 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5873 if (status & (1ull << i))
5874 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5875 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005876}
5877
5878/*
5879 * CCE block SDMA error interrupt. Source is < 16.
5880 */
5881static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5882{
5883#ifdef CONFIG_SDMA_VERBOSITY
5884 struct sdma_engine *sde = &dd->per_sdma[source];
5885
5886 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5887 slashstrip(__FILE__), __LINE__, __func__);
5888 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5889 source);
5890 sdma_dumpstate(sde);
5891#endif
5892 interrupt_clear_down(dd, source, &sdma_eng_err);
5893}
5894
5895/*
5896 * CCE block "various" interrupt. Source is < 8.
5897 */
5898static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5899{
5900 const struct err_reg_info *eri = &various_err[source];
5901
5902 /*
5903 * TCritInt cannot go through interrupt_clear_down()
5904 * because it is not a second tier interrupt. The handler
5905 * should be called directly.
5906 */
5907 if (source == TCRIT_INT_SOURCE)
5908 handle_temp_err(dd);
5909 else if (eri->handler)
5910 interrupt_clear_down(dd, 0, eri);
5911 else
5912 dd_dev_info(dd,
5913 "%s: Unimplemented/reserved interrupt %d\n",
5914 __func__, source);
5915}
5916
5917static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5918{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005919 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005920 struct hfi1_pportdata *ppd = dd->pport;
5921 unsigned long flags;
5922 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5923
5924 if (reg & QSFP_HFI0_MODPRST_N) {
5925
5926 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5927 __func__);
5928
5929 if (!qsfp_mod_present(ppd)) {
5930 ppd->driver_link_ready = 0;
5931 /*
5932 * Cable removed, reset all our information about the
5933 * cache and cable capabilities
5934 */
5935
5936 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5937 /*
5938 * We don't set cache_refresh_required here as we expect
5939 * an interrupt when a cable is inserted
5940 */
5941 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005942 ppd->qsfp_info.reset_needed = 0;
5943 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005944 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5945 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005946 /* Invert the ModPresent pin now to detect plug-in */
5947 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5948 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005949
5950 if ((ppd->offline_disabled_reason >
5951 HFI1_ODR_MASK(
5952 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) ||
5953 (ppd->offline_disabled_reason ==
5954 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5955 ppd->offline_disabled_reason =
5956 HFI1_ODR_MASK(
5957 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED);
5958
Mike Marciniszyn77241052015-07-30 15:17:43 -04005959 if (ppd->host_link_state == HLS_DN_POLL) {
5960 /*
5961 * The link is still in POLL. This means
5962 * that the normal link down processing
5963 * will not happen. We have to do it here
5964 * before turning the DC off.
5965 */
5966 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5967 }
5968 } else {
5969 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5970 ppd->qsfp_info.cache_valid = 0;
5971 ppd->qsfp_info.cache_refresh_required = 1;
5972 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5973 flags);
5974
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005975 /*
5976 * Stop inversion of ModPresent pin to detect
5977 * removal of the cable
5978 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005979 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005980 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5981 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5982
5983 ppd->offline_disabled_reason =
5984 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005985 }
5986 }
5987
5988 if (reg & QSFP_HFI0_INT_N) {
5989
5990 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5991 __func__);
5992 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5993 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005994 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5995 }
5996
5997 /* Schedule the QSFP work only if there is a cable attached. */
5998 if (qsfp_mod_present(ppd))
5999 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6000}
6001
6002static int request_host_lcb_access(struct hfi1_devdata *dd)
6003{
6004 int ret;
6005
6006 ret = do_8051_command(dd, HCMD_MISC,
6007 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6008 NULL);
6009 if (ret != HCMD_SUCCESS) {
6010 dd_dev_err(dd, "%s: command failed with error %d\n",
6011 __func__, ret);
6012 }
6013 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6014}
6015
6016static int request_8051_lcb_access(struct hfi1_devdata *dd)
6017{
6018 int ret;
6019
6020 ret = do_8051_command(dd, HCMD_MISC,
6021 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6022 NULL);
6023 if (ret != HCMD_SUCCESS) {
6024 dd_dev_err(dd, "%s: command failed with error %d\n",
6025 __func__, ret);
6026 }
6027 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6028}
6029
6030/*
6031 * Set the LCB selector - allow host access. The DCC selector always
6032 * points to the host.
6033 */
6034static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6035{
6036 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6037 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
6038 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6039}
6040
6041/*
6042 * Clear the LCB selector - allow 8051 access. The DCC selector always
6043 * points to the host.
6044 */
6045static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6046{
6047 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6048 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6049}
6050
6051/*
6052 * Acquire LCB access from the 8051. If the host already has access,
6053 * just increment a counter. Otherwise, inform the 8051 that the
6054 * host is taking access.
6055 *
6056 * Returns:
6057 * 0 on success
6058 * -EBUSY if the 8051 has control and cannot be disturbed
6059 * -errno if unable to acquire access from the 8051
6060 */
6061int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6062{
6063 struct hfi1_pportdata *ppd = dd->pport;
6064 int ret = 0;
6065
6066 /*
6067 * Use the host link state lock so the operation of this routine
6068 * { link state check, selector change, count increment } can occur
6069 * as a unit against a link state change. Otherwise there is a
6070 * race between the state change and the count increment.
6071 */
6072 if (sleep_ok) {
6073 mutex_lock(&ppd->hls_lock);
6074 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006075 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006076 udelay(1);
6077 }
6078
6079 /* this access is valid only when the link is up */
6080 if ((ppd->host_link_state & HLS_UP) == 0) {
6081 dd_dev_info(dd, "%s: link state %s not up\n",
6082 __func__, link_state_name(ppd->host_link_state));
6083 ret = -EBUSY;
6084 goto done;
6085 }
6086
6087 if (dd->lcb_access_count == 0) {
6088 ret = request_host_lcb_access(dd);
6089 if (ret) {
6090 dd_dev_err(dd,
6091 "%s: unable to acquire LCB access, err %d\n",
6092 __func__, ret);
6093 goto done;
6094 }
6095 set_host_lcb_access(dd);
6096 }
6097 dd->lcb_access_count++;
6098done:
6099 mutex_unlock(&ppd->hls_lock);
6100 return ret;
6101}
6102
6103/*
6104 * Release LCB access by decrementing the use count. If the count is moving
6105 * from 1 to 0, inform 8051 that it has control back.
6106 *
6107 * Returns:
6108 * 0 on success
6109 * -errno if unable to release access to the 8051
6110 */
6111int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6112{
6113 int ret = 0;
6114
6115 /*
6116 * Use the host link state lock because the acquire needed it.
6117 * Here, we only need to keep { selector change, count decrement }
6118 * as a unit.
6119 */
6120 if (sleep_ok) {
6121 mutex_lock(&dd->pport->hls_lock);
6122 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006123 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006124 udelay(1);
6125 }
6126
6127 if (dd->lcb_access_count == 0) {
6128 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6129 __func__);
6130 goto done;
6131 }
6132
6133 if (dd->lcb_access_count == 1) {
6134 set_8051_lcb_access(dd);
6135 ret = request_8051_lcb_access(dd);
6136 if (ret) {
6137 dd_dev_err(dd,
6138 "%s: unable to release LCB access, err %d\n",
6139 __func__, ret);
6140 /* restore host access if the grant didn't work */
6141 set_host_lcb_access(dd);
6142 goto done;
6143 }
6144 }
6145 dd->lcb_access_count--;
6146done:
6147 mutex_unlock(&dd->pport->hls_lock);
6148 return ret;
6149}
6150
6151/*
6152 * Initialize LCB access variables and state. Called during driver load,
6153 * after most of the initialization is finished.
6154 *
6155 * The DC default is LCB access on for the host. The driver defaults to
6156 * leaving access to the 8051. Assign access now - this constrains the call
6157 * to this routine to be after all LCB set-up is done. In particular, after
6158 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6159 */
6160static void init_lcb_access(struct hfi1_devdata *dd)
6161{
6162 dd->lcb_access_count = 0;
6163}
6164
6165/*
6166 * Write a response back to a 8051 request.
6167 */
6168static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6169{
6170 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6171 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6172 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6173 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6174}
6175
6176/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006177 * Handle host requests from the 8051.
6178 *
6179 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006180 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006181void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006182{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006183 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6184 dc_host_req_work);
6185 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006186 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006187 u16 data = 0;
6188 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6189 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006190
6191 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6192 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6193 return; /* no request */
6194
6195 /* zero out COMPLETED so the response is seen */
6196 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6197
6198 /* extract request details */
6199 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6200 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6201 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6202 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6203
6204 switch (type) {
6205 case HREQ_LOAD_CONFIG:
6206 case HREQ_SAVE_CONFIG:
6207 case HREQ_READ_CONFIG:
6208 case HREQ_SET_TX_EQ_ABS:
6209 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006210 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6211 type);
6212 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6213 break;
6214
Easwar Hariharancbac3862016-02-03 14:31:31 -08006215 case HREQ_ENABLE:
6216 lanes = data & 0xF;
6217 for (i = 0; lanes; lanes >>= 1, i++) {
6218 if (!(lanes & 1))
6219 continue;
6220 if (data & 0x200) {
6221 /* enable TX CDR */
6222 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6223 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6224 cdr_ctrl_byte |= (1 << (i + 4));
6225 } else {
6226 /* disable TX CDR */
6227 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6228 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6229 cdr_ctrl_byte &= ~(1 << (i + 4));
6230 }
6231
6232 if (data & 0x800) {
6233 /* enable RX CDR */
6234 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6235 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6236 cdr_ctrl_byte |= (1 << i);
6237 } else {
6238 /* disable RX CDR */
6239 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6240 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6241 cdr_ctrl_byte &= ~(1 << i);
6242 }
6243 }
6244 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6245 &cdr_ctrl_byte, 1);
6246 hreq_response(dd, HREQ_SUCCESS, data);
6247 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6248 break;
6249
Mike Marciniszyn77241052015-07-30 15:17:43 -04006250 case HREQ_CONFIG_DONE:
6251 hreq_response(dd, HREQ_SUCCESS, 0);
6252 break;
6253
6254 case HREQ_INTERFACE_TEST:
6255 hreq_response(dd, HREQ_SUCCESS, data);
6256 break;
6257
6258 default:
6259 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6260 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6261 break;
6262 }
6263}
6264
6265static void write_global_credit(struct hfi1_devdata *dd,
6266 u8 vau, u16 total, u16 shared)
6267{
6268 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6269 ((u64)total
6270 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6271 | ((u64)shared
6272 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6273 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6274}
6275
6276/*
6277 * Set up initial VL15 credits of the remote. Assumes the rest of
6278 * the CM credit registers are zero from a previous global or credit reset .
6279 */
6280void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6281{
6282 /* leave shared count at zero for both global and VL15 */
6283 write_global_credit(dd, vau, vl15buf, 0);
6284
6285 /* We may need some credits for another VL when sending packets
6286 * with the snoop interface. Dividing it down the middle for VL15
6287 * and VL0 should suffice.
6288 */
6289 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6290 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6291 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6292 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6293 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6294 } else {
6295 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6296 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6297 }
6298}
6299
6300/*
6301 * Zero all credit details from the previous connection and
6302 * reset the CM manager's internal counters.
6303 */
6304void reset_link_credits(struct hfi1_devdata *dd)
6305{
6306 int i;
6307
6308 /* remove all previous VL credit limits */
6309 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6310 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6311 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6312 write_global_credit(dd, 0, 0, 0);
6313 /* reset the CM block */
6314 pio_send_control(dd, PSC_CM_RESET);
6315}
6316
6317/* convert a vCU to a CU */
6318static u32 vcu_to_cu(u8 vcu)
6319{
6320 return 1 << vcu;
6321}
6322
6323/* convert a CU to a vCU */
6324static u8 cu_to_vcu(u32 cu)
6325{
6326 return ilog2(cu);
6327}
6328
6329/* convert a vAU to an AU */
6330static u32 vau_to_au(u8 vau)
6331{
6332 return 8 * (1 << vau);
6333}
6334
6335static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6336{
6337 ppd->sm_trap_qp = 0x0;
6338 ppd->sa_qp = 0x1;
6339}
6340
6341/*
6342 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6343 */
6344static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6345{
6346 u64 reg;
6347
6348 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6349 write_csr(dd, DC_LCB_CFG_RUN, 0);
6350 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6351 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6352 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6353 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6354 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6355 reg = read_csr(dd, DCC_CFG_RESET);
6356 write_csr(dd, DCC_CFG_RESET,
6357 reg
6358 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6359 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6360 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6361 if (!abort) {
6362 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6363 write_csr(dd, DCC_CFG_RESET, reg);
6364 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6365 }
6366}
6367
6368/*
6369 * This routine should be called after the link has been transitioned to
6370 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6371 * reset).
6372 *
6373 * The expectation is that the caller of this routine would have taken
6374 * care of properly transitioning the link into the correct state.
6375 */
6376static void dc_shutdown(struct hfi1_devdata *dd)
6377{
6378 unsigned long flags;
6379
6380 spin_lock_irqsave(&dd->dc8051_lock, flags);
6381 if (dd->dc_shutdown) {
6382 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6383 return;
6384 }
6385 dd->dc_shutdown = 1;
6386 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6387 /* Shutdown the LCB */
6388 lcb_shutdown(dd, 1);
6389 /* Going to OFFLINE would have causes the 8051 to put the
6390 * SerDes into reset already. Just need to shut down the 8051,
6391 * itself. */
6392 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6393}
6394
6395/* Calling this after the DC has been brought out of reset should not
6396 * do any damage. */
6397static void dc_start(struct hfi1_devdata *dd)
6398{
6399 unsigned long flags;
6400 int ret;
6401
6402 spin_lock_irqsave(&dd->dc8051_lock, flags);
6403 if (!dd->dc_shutdown)
6404 goto done;
6405 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6406 /* Take the 8051 out of reset */
6407 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6408 /* Wait until 8051 is ready */
6409 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6410 if (ret) {
6411 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6412 __func__);
6413 }
6414 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6415 write_csr(dd, DCC_CFG_RESET, 0x10);
6416 /* lcb_shutdown() with abort=1 does not restore these */
6417 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6418 spin_lock_irqsave(&dd->dc8051_lock, flags);
6419 dd->dc_shutdown = 0;
6420done:
6421 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6422}
6423
6424/*
6425 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6426 */
6427static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6428{
6429 u64 rx_radr, tx_radr;
6430 u32 version;
6431
6432 if (dd->icode != ICODE_FPGA_EMULATION)
6433 return;
6434
6435 /*
6436 * These LCB defaults on emulator _s are good, nothing to do here:
6437 * LCB_CFG_TX_FIFOS_RADR
6438 * LCB_CFG_RX_FIFOS_RADR
6439 * LCB_CFG_LN_DCLK
6440 * LCB_CFG_IGNORE_LOST_RCLK
6441 */
6442 if (is_emulator_s(dd))
6443 return;
6444 /* else this is _p */
6445
6446 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006447 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006448 version = 0x2d; /* all B0 use 0x2d or higher settings */
6449
6450 if (version <= 0x12) {
6451 /* release 0x12 and below */
6452
6453 /*
6454 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6455 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6456 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6457 */
6458 rx_radr =
6459 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6460 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6461 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6462 /*
6463 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6464 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6465 */
6466 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6467 } else if (version <= 0x18) {
6468 /* release 0x13 up to 0x18 */
6469 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6470 rx_radr =
6471 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6475 } else if (version == 0x19) {
6476 /* release 0x19 */
6477 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6478 rx_radr =
6479 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6480 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6481 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6482 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6483 } else if (version == 0x1a) {
6484 /* release 0x1a */
6485 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6486 rx_radr =
6487 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6488 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6489 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6490 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6491 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6492 } else {
6493 /* release 0x1b and higher */
6494 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6495 rx_radr =
6496 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6497 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6498 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6499 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6500 }
6501
6502 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6503 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6504 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6505 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6506 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6507}
6508
6509/*
6510 * Handle a SMA idle message
6511 *
6512 * This is a work-queue function outside of the interrupt.
6513 */
6514void handle_sma_message(struct work_struct *work)
6515{
6516 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6517 sma_message_work);
6518 struct hfi1_devdata *dd = ppd->dd;
6519 u64 msg;
6520 int ret;
6521
6522 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6523 is stripped off */
6524 ret = read_idle_sma(dd, &msg);
6525 if (ret)
6526 return;
6527 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6528 /*
6529 * React to the SMA message. Byte[1] (0 for us) is the command.
6530 */
6531 switch (msg & 0xff) {
6532 case SMA_IDLE_ARM:
6533 /*
6534 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6535 * State Transitions
6536 *
6537 * Only expected in INIT or ARMED, discard otherwise.
6538 */
6539 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6540 ppd->neighbor_normal = 1;
6541 break;
6542 case SMA_IDLE_ACTIVE:
6543 /*
6544 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6545 * State Transitions
6546 *
6547 * Can activate the node. Discard otherwise.
6548 */
6549 if (ppd->host_link_state == HLS_UP_ARMED
6550 && ppd->is_active_optimize_enabled) {
6551 ppd->neighbor_normal = 1;
6552 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6553 if (ret)
6554 dd_dev_err(
6555 dd,
6556 "%s: received Active SMA idle message, couldn't set link to Active\n",
6557 __func__);
6558 }
6559 break;
6560 default:
6561 dd_dev_err(dd,
6562 "%s: received unexpected SMA idle message 0x%llx\n",
6563 __func__, msg);
6564 break;
6565 }
6566}
6567
6568static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6569{
6570 u64 rcvctrl;
6571 unsigned long flags;
6572
6573 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6574 rcvctrl = read_csr(dd, RCV_CTRL);
6575 rcvctrl |= add;
6576 rcvctrl &= ~clear;
6577 write_csr(dd, RCV_CTRL, rcvctrl);
6578 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6579}
6580
6581static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6582{
6583 adjust_rcvctrl(dd, add, 0);
6584}
6585
6586static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6587{
6588 adjust_rcvctrl(dd, 0, clear);
6589}
6590
6591/*
6592 * Called from all interrupt handlers to start handling an SPC freeze.
6593 */
6594void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6595{
6596 struct hfi1_devdata *dd = ppd->dd;
6597 struct send_context *sc;
6598 int i;
6599
6600 if (flags & FREEZE_SELF)
6601 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6602
6603 /* enter frozen mode */
6604 dd->flags |= HFI1_FROZEN;
6605
6606 /* notify all SDMA engines that they are going into a freeze */
6607 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6608
6609 /* do halt pre-handling on all enabled send contexts */
6610 for (i = 0; i < dd->num_send_contexts; i++) {
6611 sc = dd->send_contexts[i].sc;
6612 if (sc && (sc->flags & SCF_ENABLED))
6613 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6614 }
6615
6616 /* Send context are frozen. Notify user space */
6617 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6618
6619 if (flags & FREEZE_ABORT) {
6620 dd_dev_err(dd,
6621 "Aborted freeze recovery. Please REBOOT system\n");
6622 return;
6623 }
6624 /* queue non-interrupt handler */
6625 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6626}
6627
6628/*
6629 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6630 * depending on the "freeze" parameter.
6631 *
6632 * No need to return an error if it times out, our only option
6633 * is to proceed anyway.
6634 */
6635static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6636{
6637 unsigned long timeout;
6638 u64 reg;
6639
6640 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6641 while (1) {
6642 reg = read_csr(dd, CCE_STATUS);
6643 if (freeze) {
6644 /* waiting until all indicators are set */
6645 if ((reg & ALL_FROZE) == ALL_FROZE)
6646 return; /* all done */
6647 } else {
6648 /* waiting until all indicators are clear */
6649 if ((reg & ALL_FROZE) == 0)
6650 return; /* all done */
6651 }
6652
6653 if (time_after(jiffies, timeout)) {
6654 dd_dev_err(dd,
6655 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6656 freeze ? "" : "un",
6657 reg & ALL_FROZE,
6658 freeze ? ALL_FROZE : 0ull);
6659 return;
6660 }
6661 usleep_range(80, 120);
6662 }
6663}
6664
6665/*
6666 * Do all freeze handling for the RXE block.
6667 */
6668static void rxe_freeze(struct hfi1_devdata *dd)
6669{
6670 int i;
6671
6672 /* disable port */
6673 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6674
6675 /* disable all receive contexts */
6676 for (i = 0; i < dd->num_rcv_contexts; i++)
6677 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6678}
6679
6680/*
6681 * Unfreeze handling for the RXE block - kernel contexts only.
6682 * This will also enable the port. User contexts will do unfreeze
6683 * handling on a per-context basis as they call into the driver.
6684 *
6685 */
6686static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6687{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006688 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006689 int i;
6690
6691 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006692 for (i = 0; i < dd->n_krcv_queues; i++) {
6693 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6694 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6695 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6696 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6697 hfi1_rcvctrl(dd, rcvmask, i);
6698 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006699
6700 /* enable port */
6701 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6702}
6703
6704/*
6705 * Non-interrupt SPC freeze handling.
6706 *
6707 * This is a work-queue function outside of the triggering interrupt.
6708 */
6709void handle_freeze(struct work_struct *work)
6710{
6711 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6712 freeze_work);
6713 struct hfi1_devdata *dd = ppd->dd;
6714
6715 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006716 wait_for_freeze_status(dd, 1);
6717
6718 /* SPC is now frozen */
6719
6720 /* do send PIO freeze steps */
6721 pio_freeze(dd);
6722
6723 /* do send DMA freeze steps */
6724 sdma_freeze(dd);
6725
6726 /* do send egress freeze steps - nothing to do */
6727
6728 /* do receive freeze steps */
6729 rxe_freeze(dd);
6730
6731 /*
6732 * Unfreeze the hardware - clear the freeze, wait for each
6733 * block's frozen bit to clear, then clear the frozen flag.
6734 */
6735 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6736 wait_for_freeze_status(dd, 0);
6737
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006738 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006739 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6740 wait_for_freeze_status(dd, 1);
6741 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6742 wait_for_freeze_status(dd, 0);
6743 }
6744
6745 /* do send PIO unfreeze steps for kernel contexts */
6746 pio_kernel_unfreeze(dd);
6747
6748 /* do send DMA unfreeze steps */
6749 sdma_unfreeze(dd);
6750
6751 /* do send egress unfreeze steps - nothing to do */
6752
6753 /* do receive unfreeze steps for kernel contexts */
6754 rxe_kernel_unfreeze(dd);
6755
6756 /*
6757 * The unfreeze procedure touches global device registers when
6758 * it disables and re-enables RXE. Mark the device unfrozen
6759 * after all that is done so other parts of the driver waiting
6760 * for the device to unfreeze don't do things out of order.
6761 *
6762 * The above implies that the meaning of HFI1_FROZEN flag is
6763 * "Device has gone into freeze mode and freeze mode handling
6764 * is still in progress."
6765 *
6766 * The flag will be removed when freeze mode processing has
6767 * completed.
6768 */
6769 dd->flags &= ~HFI1_FROZEN;
6770 wake_up(&dd->event_queue);
6771
6772 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006773}
6774
6775/*
6776 * Handle a link up interrupt from the 8051.
6777 *
6778 * This is a work-queue function outside of the interrupt.
6779 */
6780void handle_link_up(struct work_struct *work)
6781{
6782 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6783 link_up_work);
6784 set_link_state(ppd, HLS_UP_INIT);
6785
6786 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6787 read_ltp_rtt(ppd->dd);
6788 /*
6789 * OPA specifies that certain counters are cleared on a transition
6790 * to link up, so do that.
6791 */
6792 clear_linkup_counters(ppd->dd);
6793 /*
6794 * And (re)set link up default values.
6795 */
6796 set_linkup_defaults(ppd);
6797
6798 /* enforce link speed enabled */
6799 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6800 /* oops - current speed is not enabled, bounce */
6801 dd_dev_err(ppd->dd,
6802 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6803 ppd->link_speed_active, ppd->link_speed_enabled);
6804 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6805 OPA_LINKDOWN_REASON_SPEED_POLICY);
6806 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006807 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006808 start_link(ppd);
6809 }
6810}
6811
6812/* Several pieces of LNI information were cached for SMA in ppd.
6813 * Reset these on link down */
6814static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6815{
6816 ppd->neighbor_guid = 0;
6817 ppd->neighbor_port_number = 0;
6818 ppd->neighbor_type = 0;
6819 ppd->neighbor_fm_security = 0;
6820}
6821
6822/*
6823 * Handle a link down interrupt from the 8051.
6824 *
6825 * This is a work-queue function outside of the interrupt.
6826 */
6827void handle_link_down(struct work_struct *work)
6828{
6829 u8 lcl_reason, neigh_reason = 0;
6830 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6831 link_down_work);
6832
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006833 if ((ppd->host_link_state &
6834 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6835 ppd->port_type == PORT_TYPE_FIXED)
6836 ppd->offline_disabled_reason =
6837 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6838
6839 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006840 set_link_state(ppd, HLS_DN_OFFLINE);
6841
6842 lcl_reason = 0;
6843 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6844
6845 /*
6846 * If no reason, assume peer-initiated but missed
6847 * LinkGoingDown idle flits.
6848 */
6849 if (neigh_reason == 0)
6850 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6851
6852 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6853
6854 reset_neighbor_info(ppd);
6855
6856 /* disable the port */
6857 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6858
6859 /* If there is no cable attached, turn the DC off. Otherwise,
6860 * start the link bring up. */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006861 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006862 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006863 } else {
6864 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006865 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006866 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006867}
6868
6869void handle_link_bounce(struct work_struct *work)
6870{
6871 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6872 link_bounce_work);
6873
6874 /*
6875 * Only do something if the link is currently up.
6876 */
6877 if (ppd->host_link_state & HLS_UP) {
6878 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006879 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006880 start_link(ppd);
6881 } else {
6882 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6883 __func__, link_state_name(ppd->host_link_state));
6884 }
6885}
6886
6887/*
6888 * Mask conversion: Capability exchange to Port LTP. The capability
6889 * exchange has an implicit 16b CRC that is mandatory.
6890 */
6891static int cap_to_port_ltp(int cap)
6892{
6893 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6894
6895 if (cap & CAP_CRC_14B)
6896 port_ltp |= PORT_LTP_CRC_MODE_14;
6897 if (cap & CAP_CRC_48B)
6898 port_ltp |= PORT_LTP_CRC_MODE_48;
6899 if (cap & CAP_CRC_12B_16B_PER_LANE)
6900 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6901
6902 return port_ltp;
6903}
6904
6905/*
6906 * Convert an OPA Port LTP mask to capability mask
6907 */
6908int port_ltp_to_cap(int port_ltp)
6909{
6910 int cap_mask = 0;
6911
6912 if (port_ltp & PORT_LTP_CRC_MODE_14)
6913 cap_mask |= CAP_CRC_14B;
6914 if (port_ltp & PORT_LTP_CRC_MODE_48)
6915 cap_mask |= CAP_CRC_48B;
6916 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6917 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6918
6919 return cap_mask;
6920}
6921
6922/*
6923 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6924 */
6925static int lcb_to_port_ltp(int lcb_crc)
6926{
6927 int port_ltp = 0;
6928
6929 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6930 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6931 else if (lcb_crc == LCB_CRC_48B)
6932 port_ltp = PORT_LTP_CRC_MODE_48;
6933 else if (lcb_crc == LCB_CRC_14B)
6934 port_ltp = PORT_LTP_CRC_MODE_14;
6935 else
6936 port_ltp = PORT_LTP_CRC_MODE_16;
6937
6938 return port_ltp;
6939}
6940
6941/*
6942 * Our neighbor has indicated that we are allowed to act as a fabric
6943 * manager, so place the full management partition key in the second
6944 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6945 * that we should already have the limited management partition key in
6946 * array element 1, and also that the port is not yet up when
6947 * add_full_mgmt_pkey() is invoked.
6948 */
6949static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6950{
6951 struct hfi1_devdata *dd = ppd->dd;
6952
Dean Luick87645222015-12-01 15:38:21 -05006953 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6954 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6955 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6956 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006957 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6958 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6959}
6960
6961/*
6962 * Convert the given link width to the OPA link width bitmask.
6963 */
6964static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6965{
6966 switch (width) {
6967 case 0:
6968 /*
6969 * Simulator and quick linkup do not set the width.
6970 * Just set it to 4x without complaint.
6971 */
6972 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6973 return OPA_LINK_WIDTH_4X;
6974 return 0; /* no lanes up */
6975 case 1: return OPA_LINK_WIDTH_1X;
6976 case 2: return OPA_LINK_WIDTH_2X;
6977 case 3: return OPA_LINK_WIDTH_3X;
6978 default:
6979 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6980 __func__, width);
6981 /* fall through */
6982 case 4: return OPA_LINK_WIDTH_4X;
6983 }
6984}
6985
6986/*
6987 * Do a population count on the bottom nibble.
6988 */
6989static const u8 bit_counts[16] = {
6990 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6991};
6992static inline u8 nibble_to_count(u8 nibble)
6993{
6994 return bit_counts[nibble & 0xf];
6995}
6996
6997/*
6998 * Read the active lane information from the 8051 registers and return
6999 * their widths.
7000 *
7001 * Active lane information is found in these 8051 registers:
7002 * enable_lane_tx
7003 * enable_lane_rx
7004 */
7005static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7006 u16 *rx_width)
7007{
7008 u16 tx, rx;
7009 u8 enable_lane_rx;
7010 u8 enable_lane_tx;
7011 u8 tx_polarity_inversion;
7012 u8 rx_polarity_inversion;
7013 u8 max_rate;
7014
7015 /* read the active lanes */
7016 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7017 &rx_polarity_inversion, &max_rate);
7018 read_local_lni(dd, &enable_lane_rx);
7019
7020 /* convert to counts */
7021 tx = nibble_to_count(enable_lane_tx);
7022 rx = nibble_to_count(enable_lane_rx);
7023
7024 /*
7025 * Set link_speed_active here, overriding what was set in
7026 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7027 * set the max_rate field in handle_verify_cap until v0.19.
7028 */
7029 if ((dd->icode == ICODE_RTL_SILICON)
7030 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
7031 /* max_rate: 0 = 12.5G, 1 = 25G */
7032 switch (max_rate) {
7033 case 0:
7034 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7035 break;
7036 default:
7037 dd_dev_err(dd,
7038 "%s: unexpected max rate %d, using 25Gb\n",
7039 __func__, (int)max_rate);
7040 /* fall through */
7041 case 1:
7042 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7043 break;
7044 }
7045 }
7046
7047 dd_dev_info(dd,
7048 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7049 enable_lane_tx, tx, enable_lane_rx, rx);
7050 *tx_width = link_width_to_bits(dd, tx);
7051 *rx_width = link_width_to_bits(dd, rx);
7052}
7053
7054/*
7055 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7056 * Valid after the end of VerifyCap and during LinkUp. Does not change
7057 * after link up. I.e. look elsewhere for downgrade information.
7058 *
7059 * Bits are:
7060 * + bits [7:4] contain the number of active transmitters
7061 * + bits [3:0] contain the number of active receivers
7062 * These are numbers 1 through 4 and can be different values if the
7063 * link is asymmetric.
7064 *
7065 * verify_cap_local_fm_link_width[0] retains its original value.
7066 */
7067static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7068 u16 *rx_width)
7069{
7070 u16 widths, tx, rx;
7071 u8 misc_bits, local_flags;
7072 u16 active_tx, active_rx;
7073
7074 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7075 tx = widths >> 12;
7076 rx = (widths >> 8) & 0xf;
7077
7078 *tx_width = link_width_to_bits(dd, tx);
7079 *rx_width = link_width_to_bits(dd, rx);
7080
7081 /* print the active widths */
7082 get_link_widths(dd, &active_tx, &active_rx);
7083}
7084
7085/*
7086 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7087 * hardware information when the link first comes up.
7088 *
7089 * The link width is not available until after VerifyCap.AllFramesReceived
7090 * (the trigger for handle_verify_cap), so this is outside that routine
7091 * and should be called when the 8051 signals linkup.
7092 */
7093void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7094{
7095 u16 tx_width, rx_width;
7096
7097 /* get end-of-LNI link widths */
7098 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7099
7100 /* use tx_width as the link is supposed to be symmetric on link up */
7101 ppd->link_width_active = tx_width;
7102 /* link width downgrade active (LWD.A) starts out matching LW.A */
7103 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7104 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7105 /* per OPA spec, on link up LWD.E resets to LWD.S */
7106 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7107 /* cache the active egress rate (units {10^6 bits/sec]) */
7108 ppd->current_egress_rate = active_egress_rate(ppd);
7109}
7110
7111/*
7112 * Handle a verify capabilities interrupt from the 8051.
7113 *
7114 * This is a work-queue function outside of the interrupt.
7115 */
7116void handle_verify_cap(struct work_struct *work)
7117{
7118 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7119 link_vc_work);
7120 struct hfi1_devdata *dd = ppd->dd;
7121 u64 reg;
7122 u8 power_management;
7123 u8 continious;
7124 u8 vcu;
7125 u8 vau;
7126 u8 z;
7127 u16 vl15buf;
7128 u16 link_widths;
7129 u16 crc_mask;
7130 u16 crc_val;
7131 u16 device_id;
7132 u16 active_tx, active_rx;
7133 u8 partner_supported_crc;
7134 u8 remote_tx_rate;
7135 u8 device_rev;
7136
7137 set_link_state(ppd, HLS_VERIFY_CAP);
7138
7139 lcb_shutdown(dd, 0);
7140 adjust_lcb_for_fpga_serdes(dd);
7141
7142 /*
7143 * These are now valid:
7144 * remote VerifyCap fields in the general LNI config
7145 * CSR DC8051_STS_REMOTE_GUID
7146 * CSR DC8051_STS_REMOTE_NODE_TYPE
7147 * CSR DC8051_STS_REMOTE_FM_SECURITY
7148 * CSR DC8051_STS_REMOTE_PORT_NO
7149 */
7150
7151 read_vc_remote_phy(dd, &power_management, &continious);
7152 read_vc_remote_fabric(
7153 dd,
7154 &vau,
7155 &z,
7156 &vcu,
7157 &vl15buf,
7158 &partner_supported_crc);
7159 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7160 read_remote_device_id(dd, &device_id, &device_rev);
7161 /*
7162 * And the 'MgmtAllowed' information, which is exchanged during
7163 * LNI, is also be available at this point.
7164 */
7165 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7166 /* print the active widths */
7167 get_link_widths(dd, &active_tx, &active_rx);
7168 dd_dev_info(dd,
7169 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7170 (int)power_management, (int)continious);
7171 dd_dev_info(dd,
7172 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7173 (int)vau,
7174 (int)z,
7175 (int)vcu,
7176 (int)vl15buf,
7177 (int)partner_supported_crc);
7178 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7179 (u32)remote_tx_rate, (u32)link_widths);
7180 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7181 (u32)device_id, (u32)device_rev);
7182 /*
7183 * The peer vAU value just read is the peer receiver value. HFI does
7184 * not support a transmit vAU of 0 (AU == 8). We advertised that
7185 * with Z=1 in the fabric capabilities sent to the peer. The peer
7186 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7187 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7188 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7189 * subject to the Z value exception.
7190 */
7191 if (vau == 0)
7192 vau = 1;
7193 set_up_vl15(dd, vau, vl15buf);
7194
7195 /* set up the LCB CRC mode */
7196 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7197
7198 /* order is important: use the lowest bit in common */
7199 if (crc_mask & CAP_CRC_14B)
7200 crc_val = LCB_CRC_14B;
7201 else if (crc_mask & CAP_CRC_48B)
7202 crc_val = LCB_CRC_48B;
7203 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7204 crc_val = LCB_CRC_12B_16B_PER_LANE;
7205 else
7206 crc_val = LCB_CRC_16B;
7207
7208 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7209 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7210 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7211
7212 /* set (14b only) or clear sideband credit */
7213 reg = read_csr(dd, SEND_CM_CTRL);
7214 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7215 write_csr(dd, SEND_CM_CTRL,
7216 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7217 } else {
7218 write_csr(dd, SEND_CM_CTRL,
7219 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7220 }
7221
7222 ppd->link_speed_active = 0; /* invalid value */
7223 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7224 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7225 switch (remote_tx_rate) {
7226 case 0:
7227 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7228 break;
7229 case 1:
7230 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7231 break;
7232 }
7233 } else {
7234 /* actual rate is highest bit of the ANDed rates */
7235 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7236
7237 if (rate & 2)
7238 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7239 else if (rate & 1)
7240 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7241 }
7242 if (ppd->link_speed_active == 0) {
7243 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7244 __func__, (int)remote_tx_rate);
7245 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7246 }
7247
7248 /*
7249 * Cache the values of the supported, enabled, and active
7250 * LTP CRC modes to return in 'portinfo' queries. But the bit
7251 * flags that are returned in the portinfo query differ from
7252 * what's in the link_crc_mask, crc_sizes, and crc_val
7253 * variables. Convert these here.
7254 */
7255 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7256 /* supported crc modes */
7257 ppd->port_ltp_crc_mode |=
7258 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7259 /* enabled crc modes */
7260 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7261 /* active crc mode */
7262
7263 /* set up the remote credit return table */
7264 assign_remote_cm_au_table(dd, vcu);
7265
7266 /*
7267 * The LCB is reset on entry to handle_verify_cap(), so this must
7268 * be applied on every link up.
7269 *
7270 * Adjust LCB error kill enable to kill the link if
7271 * these RBUF errors are seen:
7272 * REPLAY_BUF_MBE_SMASK
7273 * FLIT_INPUT_BUF_MBE_SMASK
7274 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007275 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007276 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7277 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7278 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7279 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7280 }
7281
7282 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7283 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7284
7285 /* give 8051 access to the LCB CSRs */
7286 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7287 set_8051_lcb_access(dd);
7288
7289 ppd->neighbor_guid =
7290 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7291 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7292 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7293 ppd->neighbor_type =
7294 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7295 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7296 ppd->neighbor_fm_security =
7297 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7298 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7299 dd_dev_info(dd,
7300 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7301 ppd->neighbor_guid, ppd->neighbor_type,
7302 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7303 if (ppd->mgmt_allowed)
7304 add_full_mgmt_pkey(ppd);
7305
7306 /* tell the 8051 to go to LinkUp */
7307 set_link_state(ppd, HLS_GOING_UP);
7308}
7309
7310/*
7311 * Apply the link width downgrade enabled policy against the current active
7312 * link widths.
7313 *
7314 * Called when the enabled policy changes or the active link widths change.
7315 */
7316void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7317{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007318 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007319 int tries;
7320 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007321 u16 tx, rx;
7322
Dean Luick323fd782015-11-16 21:59:24 -05007323 /* use the hls lock to avoid a race with actual link up */
7324 tries = 0;
7325retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007326 mutex_lock(&ppd->hls_lock);
7327 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007328 if (!(ppd->host_link_state & HLS_UP)) {
7329 /* still going up..wait and retry */
7330 if (ppd->host_link_state & HLS_GOING_UP) {
7331 if (++tries < 1000) {
7332 mutex_unlock(&ppd->hls_lock);
7333 usleep_range(100, 120); /* arbitrary */
7334 goto retry;
7335 }
7336 dd_dev_err(ppd->dd,
7337 "%s: giving up waiting for link state change\n",
7338 __func__);
7339 }
7340 goto done;
7341 }
7342
7343 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007344
7345 if (refresh_widths) {
7346 get_link_widths(ppd->dd, &tx, &rx);
7347 ppd->link_width_downgrade_tx_active = tx;
7348 ppd->link_width_downgrade_rx_active = rx;
7349 }
7350
7351 if (lwde == 0) {
7352 /* downgrade is disabled */
7353
7354 /* bounce if not at starting active width */
7355 if ((ppd->link_width_active !=
7356 ppd->link_width_downgrade_tx_active)
7357 || (ppd->link_width_active !=
7358 ppd->link_width_downgrade_rx_active)) {
7359 dd_dev_err(ppd->dd,
7360 "Link downgrade is disabled and link has downgraded, downing link\n");
7361 dd_dev_err(ppd->dd,
7362 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7363 ppd->link_width_active,
7364 ppd->link_width_downgrade_tx_active,
7365 ppd->link_width_downgrade_rx_active);
7366 do_bounce = 1;
7367 }
7368 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7369 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7370 /* Tx or Rx is outside the enabled policy */
7371 dd_dev_err(ppd->dd,
7372 "Link is outside of downgrade allowed, downing link\n");
7373 dd_dev_err(ppd->dd,
7374 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7375 lwde,
7376 ppd->link_width_downgrade_tx_active,
7377 ppd->link_width_downgrade_rx_active);
7378 do_bounce = 1;
7379 }
7380
Dean Luick323fd782015-11-16 21:59:24 -05007381done:
7382 mutex_unlock(&ppd->hls_lock);
7383
Mike Marciniszyn77241052015-07-30 15:17:43 -04007384 if (do_bounce) {
7385 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7386 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7387 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007388 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007389 start_link(ppd);
7390 }
7391}
7392
7393/*
7394 * Handle a link downgrade interrupt from the 8051.
7395 *
7396 * This is a work-queue function outside of the interrupt.
7397 */
7398void handle_link_downgrade(struct work_struct *work)
7399{
7400 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7401 link_downgrade_work);
7402
7403 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7404 apply_link_downgrade_policy(ppd, 1);
7405}
7406
7407static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7408{
7409 return flag_string(buf, buf_len, flags, dcc_err_flags,
7410 ARRAY_SIZE(dcc_err_flags));
7411}
7412
7413static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7414{
7415 return flag_string(buf, buf_len, flags, lcb_err_flags,
7416 ARRAY_SIZE(lcb_err_flags));
7417}
7418
7419static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7420{
7421 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7422 ARRAY_SIZE(dc8051_err_flags));
7423}
7424
7425static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7426{
7427 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7428 ARRAY_SIZE(dc8051_info_err_flags));
7429}
7430
7431static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7432{
7433 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7434 ARRAY_SIZE(dc8051_info_host_msg_flags));
7435}
7436
7437static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7438{
7439 struct hfi1_pportdata *ppd = dd->pport;
7440 u64 info, err, host_msg;
7441 int queue_link_down = 0;
7442 char buf[96];
7443
7444 /* look at the flags */
7445 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7446 /* 8051 information set by firmware */
7447 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7448 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7449 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7450 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7451 host_msg = (info >>
7452 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7453 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7454
7455 /*
7456 * Handle error flags.
7457 */
7458 if (err & FAILED_LNI) {
7459 /*
7460 * LNI error indications are cleared by the 8051
7461 * only when starting polling. Only pay attention
7462 * to them when in the states that occur during
7463 * LNI.
7464 */
7465 if (ppd->host_link_state
7466 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7467 queue_link_down = 1;
7468 dd_dev_info(dd, "Link error: %s\n",
7469 dc8051_info_err_string(buf,
7470 sizeof(buf),
7471 err & FAILED_LNI));
7472 }
7473 err &= ~(u64)FAILED_LNI;
7474 }
Dean Luick6d014532015-12-01 15:38:23 -05007475 /* unknown frames can happen durning LNI, just count */
7476 if (err & UNKNOWN_FRAME) {
7477 ppd->unknown_frame_count++;
7478 err &= ~(u64)UNKNOWN_FRAME;
7479 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007480 if (err) {
7481 /* report remaining errors, but do not do anything */
7482 dd_dev_err(dd, "8051 info error: %s\n",
7483 dc8051_info_err_string(buf, sizeof(buf), err));
7484 }
7485
7486 /*
7487 * Handle host message flags.
7488 */
7489 if (host_msg & HOST_REQ_DONE) {
7490 /*
7491 * Presently, the driver does a busy wait for
7492 * host requests to complete. This is only an
7493 * informational message.
7494 * NOTE: The 8051 clears the host message
7495 * information *on the next 8051 command*.
7496 * Therefore, when linkup is achieved,
7497 * this flag will still be set.
7498 */
7499 host_msg &= ~(u64)HOST_REQ_DONE;
7500 }
7501 if (host_msg & BC_SMA_MSG) {
7502 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7503 host_msg &= ~(u64)BC_SMA_MSG;
7504 }
7505 if (host_msg & LINKUP_ACHIEVED) {
7506 dd_dev_info(dd, "8051: Link up\n");
7507 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7508 host_msg &= ~(u64)LINKUP_ACHIEVED;
7509 }
7510 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007511 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007512 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7513 }
7514 if (host_msg & VERIFY_CAP_FRAME) {
7515 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7516 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7517 }
7518 if (host_msg & LINK_GOING_DOWN) {
7519 const char *extra = "";
7520 /* no downgrade action needed if going down */
7521 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7522 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7523 extra = " (ignoring downgrade)";
7524 }
7525 dd_dev_info(dd, "8051: Link down%s\n", extra);
7526 queue_link_down = 1;
7527 host_msg &= ~(u64)LINK_GOING_DOWN;
7528 }
7529 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7530 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7531 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7532 }
7533 if (host_msg) {
7534 /* report remaining messages, but do not do anything */
7535 dd_dev_info(dd, "8051 info host message: %s\n",
7536 dc8051_info_host_msg_string(buf, sizeof(buf),
7537 host_msg));
7538 }
7539
7540 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7541 }
7542 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7543 /*
7544 * Lost the 8051 heartbeat. If this happens, we
7545 * receive constant interrupts about it. Disable
7546 * the interrupt after the first.
7547 */
7548 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7549 write_csr(dd, DC_DC8051_ERR_EN,
7550 read_csr(dd, DC_DC8051_ERR_EN)
7551 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7552
7553 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7554 }
7555 if (reg) {
7556 /* report the error, but do not do anything */
7557 dd_dev_err(dd, "8051 error: %s\n",
7558 dc8051_err_string(buf, sizeof(buf), reg));
7559 }
7560
7561 if (queue_link_down) {
7562 /* if the link is already going down or disabled, do not
7563 * queue another */
7564 if ((ppd->host_link_state
7565 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7566 || ppd->link_enabled == 0) {
7567 dd_dev_info(dd, "%s: not queuing link down\n",
7568 __func__);
7569 } else {
7570 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7571 }
7572 }
7573}
7574
7575static const char * const fm_config_txt[] = {
7576[0] =
7577 "BadHeadDist: Distance violation between two head flits",
7578[1] =
7579 "BadTailDist: Distance violation between two tail flits",
7580[2] =
7581 "BadCtrlDist: Distance violation between two credit control flits",
7582[3] =
7583 "BadCrdAck: Credits return for unsupported VL",
7584[4] =
7585 "UnsupportedVLMarker: Received VL Marker",
7586[5] =
7587 "BadPreempt: Exceeded the preemption nesting level",
7588[6] =
7589 "BadControlFlit: Received unsupported control flit",
7590/* no 7 */
7591[8] =
7592 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7593};
7594
7595static const char * const port_rcv_txt[] = {
7596[1] =
7597 "BadPktLen: Illegal PktLen",
7598[2] =
7599 "PktLenTooLong: Packet longer than PktLen",
7600[3] =
7601 "PktLenTooShort: Packet shorter than PktLen",
7602[4] =
7603 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7604[5] =
7605 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7606[6] =
7607 "BadL2: Illegal L2 opcode",
7608[7] =
7609 "BadSC: Unsupported SC",
7610[9] =
7611 "BadRC: Illegal RC",
7612[11] =
7613 "PreemptError: Preempting with same VL",
7614[12] =
7615 "PreemptVL15: Preempting a VL15 packet",
7616};
7617
7618#define OPA_LDR_FMCONFIG_OFFSET 16
7619#define OPA_LDR_PORTRCV_OFFSET 0
7620static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7621{
7622 u64 info, hdr0, hdr1;
7623 const char *extra;
7624 char buf[96];
7625 struct hfi1_pportdata *ppd = dd->pport;
7626 u8 lcl_reason = 0;
7627 int do_bounce = 0;
7628
7629 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7630 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7631 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7632 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7633 /* set status bit */
7634 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7635 }
7636 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7637 }
7638
7639 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7640 struct hfi1_pportdata *ppd = dd->pport;
7641 /* this counter saturates at (2^32) - 1 */
7642 if (ppd->link_downed < (u32)UINT_MAX)
7643 ppd->link_downed++;
7644 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7645 }
7646
7647 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7648 u8 reason_valid = 1;
7649
7650 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7651 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7652 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7653 /* set status bit */
7654 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7655 }
7656 switch (info) {
7657 case 0:
7658 case 1:
7659 case 2:
7660 case 3:
7661 case 4:
7662 case 5:
7663 case 6:
7664 extra = fm_config_txt[info];
7665 break;
7666 case 8:
7667 extra = fm_config_txt[info];
7668 if (ppd->port_error_action &
7669 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7670 do_bounce = 1;
7671 /*
7672 * lcl_reason cannot be derived from info
7673 * for this error
7674 */
7675 lcl_reason =
7676 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7677 }
7678 break;
7679 default:
7680 reason_valid = 0;
7681 snprintf(buf, sizeof(buf), "reserved%lld", info);
7682 extra = buf;
7683 break;
7684 }
7685
7686 if (reason_valid && !do_bounce) {
7687 do_bounce = ppd->port_error_action &
7688 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7689 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7690 }
7691
7692 /* just report this */
7693 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7694 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7695 }
7696
7697 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7698 u8 reason_valid = 1;
7699
7700 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7701 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7702 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7703 if (!(dd->err_info_rcvport.status_and_code &
7704 OPA_EI_STATUS_SMASK)) {
7705 dd->err_info_rcvport.status_and_code =
7706 info & OPA_EI_CODE_SMASK;
7707 /* set status bit */
7708 dd->err_info_rcvport.status_and_code |=
7709 OPA_EI_STATUS_SMASK;
7710 /* save first 2 flits in the packet that caused
7711 * the error */
7712 dd->err_info_rcvport.packet_flit1 = hdr0;
7713 dd->err_info_rcvport.packet_flit2 = hdr1;
7714 }
7715 switch (info) {
7716 case 1:
7717 case 2:
7718 case 3:
7719 case 4:
7720 case 5:
7721 case 6:
7722 case 7:
7723 case 9:
7724 case 11:
7725 case 12:
7726 extra = port_rcv_txt[info];
7727 break;
7728 default:
7729 reason_valid = 0;
7730 snprintf(buf, sizeof(buf), "reserved%lld", info);
7731 extra = buf;
7732 break;
7733 }
7734
7735 if (reason_valid && !do_bounce) {
7736 do_bounce = ppd->port_error_action &
7737 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7738 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7739 }
7740
7741 /* just report this */
7742 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7743 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7744 hdr0, hdr1);
7745
7746 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7747 }
7748
7749 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7750 /* informative only */
7751 dd_dev_info(dd, "8051 access to LCB blocked\n");
7752 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7753 }
7754 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7755 /* informative only */
7756 dd_dev_info(dd, "host access to LCB blocked\n");
7757 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7758 }
7759
7760 /* report any remaining errors */
7761 if (reg)
7762 dd_dev_info(dd, "DCC Error: %s\n",
7763 dcc_err_string(buf, sizeof(buf), reg));
7764
7765 if (lcl_reason == 0)
7766 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7767
7768 if (do_bounce) {
7769 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7770 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7771 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7772 }
7773}
7774
7775static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7776{
7777 char buf[96];
7778
7779 dd_dev_info(dd, "LCB Error: %s\n",
7780 lcb_err_string(buf, sizeof(buf), reg));
7781}
7782
7783/*
7784 * CCE block DC interrupt. Source is < 8.
7785 */
7786static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7787{
7788 const struct err_reg_info *eri = &dc_errs[source];
7789
7790 if (eri->handler) {
7791 interrupt_clear_down(dd, 0, eri);
7792 } else if (source == 3 /* dc_lbm_int */) {
7793 /*
7794 * This indicates that a parity error has occurred on the
7795 * address/control lines presented to the LBM. The error
7796 * is a single pulse, there is no associated error flag,
7797 * and it is non-maskable. This is because if a parity
7798 * error occurs on the request the request is dropped.
7799 * This should never occur, but it is nice to know if it
7800 * ever does.
7801 */
7802 dd_dev_err(dd, "Parity error in DC LBM block\n");
7803 } else {
7804 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7805 }
7806}
7807
7808/*
7809 * TX block send credit interrupt. Source is < 160.
7810 */
7811static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7812{
7813 sc_group_release_update(dd, source);
7814}
7815
7816/*
7817 * TX block SDMA interrupt. Source is < 48.
7818 *
7819 * SDMA interrupts are grouped by type:
7820 *
7821 * 0 - N-1 = SDma
7822 * N - 2N-1 = SDmaProgress
7823 * 2N - 3N-1 = SDmaIdle
7824 */
7825static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7826{
7827 /* what interrupt */
7828 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7829 /* which engine */
7830 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7831
7832#ifdef CONFIG_SDMA_VERBOSITY
7833 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7834 slashstrip(__FILE__), __LINE__, __func__);
7835 sdma_dumpstate(&dd->per_sdma[which]);
7836#endif
7837
7838 if (likely(what < 3 && which < dd->num_sdma)) {
7839 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7840 } else {
7841 /* should not happen */
7842 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7843 }
7844}
7845
7846/*
7847 * RX block receive available interrupt. Source is < 160.
7848 */
7849static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7850{
7851 struct hfi1_ctxtdata *rcd;
7852 char *err_detail;
7853
7854 if (likely(source < dd->num_rcv_contexts)) {
7855 rcd = dd->rcd[source];
7856 if (rcd) {
7857 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007858 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007859 else
7860 handle_user_interrupt(rcd);
7861 return; /* OK */
7862 }
7863 /* received an interrupt, but no rcd */
7864 err_detail = "dataless";
7865 } else {
7866 /* received an interrupt, but are not using that context */
7867 err_detail = "out of range";
7868 }
7869 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7870 err_detail, source);
7871}
7872
7873/*
7874 * RX block receive urgent interrupt. Source is < 160.
7875 */
7876static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7877{
7878 struct hfi1_ctxtdata *rcd;
7879 char *err_detail;
7880
7881 if (likely(source < dd->num_rcv_contexts)) {
7882 rcd = dd->rcd[source];
7883 if (rcd) {
7884 /* only pay attention to user urgent interrupts */
7885 if (source >= dd->first_user_ctxt)
7886 handle_user_interrupt(rcd);
7887 return; /* OK */
7888 }
7889 /* received an interrupt, but no rcd */
7890 err_detail = "dataless";
7891 } else {
7892 /* received an interrupt, but are not using that context */
7893 err_detail = "out of range";
7894 }
7895 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7896 err_detail, source);
7897}
7898
7899/*
7900 * Reserved range interrupt. Should not be called in normal operation.
7901 */
7902static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7903{
7904 char name[64];
7905
7906 dd_dev_err(dd, "unexpected %s interrupt\n",
7907 is_reserved_name(name, sizeof(name), source));
7908}
7909
7910static const struct is_table is_table[] = {
7911/* start end
7912 name func interrupt func */
7913{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7914 is_misc_err_name, is_misc_err_int },
7915{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7916 is_sdma_eng_err_name, is_sdma_eng_err_int },
7917{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7918 is_sendctxt_err_name, is_sendctxt_err_int },
7919{ IS_SDMA_START, IS_SDMA_END,
7920 is_sdma_eng_name, is_sdma_eng_int },
7921{ IS_VARIOUS_START, IS_VARIOUS_END,
7922 is_various_name, is_various_int },
7923{ IS_DC_START, IS_DC_END,
7924 is_dc_name, is_dc_int },
7925{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7926 is_rcv_avail_name, is_rcv_avail_int },
7927{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7928 is_rcv_urgent_name, is_rcv_urgent_int },
7929{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7930 is_send_credit_name, is_send_credit_int},
7931{ IS_RESERVED_START, IS_RESERVED_END,
7932 is_reserved_name, is_reserved_int},
7933};
7934
7935/*
7936 * Interrupt source interrupt - called when the given source has an interrupt.
7937 * Source is a bit index into an array of 64-bit integers.
7938 */
7939static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7940{
7941 const struct is_table *entry;
7942
7943 /* avoids a double compare by walking the table in-order */
7944 for (entry = &is_table[0]; entry->is_name; entry++) {
7945 if (source < entry->end) {
7946 trace_hfi1_interrupt(dd, entry, source);
7947 entry->is_int(dd, source - entry->start);
7948 return;
7949 }
7950 }
7951 /* fell off the end */
7952 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7953}
7954
7955/*
7956 * General interrupt handler. This is able to correctly handle
7957 * all interrupts in case INTx is used.
7958 */
7959static irqreturn_t general_interrupt(int irq, void *data)
7960{
7961 struct hfi1_devdata *dd = data;
7962 u64 regs[CCE_NUM_INT_CSRS];
7963 u32 bit;
7964 int i;
7965
7966 this_cpu_inc(*dd->int_counter);
7967
7968 /* phase 1: scan and clear all handled interrupts */
7969 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7970 if (dd->gi_mask[i] == 0) {
7971 regs[i] = 0; /* used later */
7972 continue;
7973 }
7974 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7975 dd->gi_mask[i];
7976 /* only clear if anything is set */
7977 if (regs[i])
7978 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7979 }
7980
7981 /* phase 2: call the appropriate handler */
7982 for_each_set_bit(bit, (unsigned long *)&regs[0],
7983 CCE_NUM_INT_CSRS*64) {
7984 is_interrupt(dd, bit);
7985 }
7986
7987 return IRQ_HANDLED;
7988}
7989
7990static irqreturn_t sdma_interrupt(int irq, void *data)
7991{
7992 struct sdma_engine *sde = data;
7993 struct hfi1_devdata *dd = sde->dd;
7994 u64 status;
7995
7996#ifdef CONFIG_SDMA_VERBOSITY
7997 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7998 slashstrip(__FILE__), __LINE__, __func__);
7999 sdma_dumpstate(sde);
8000#endif
8001
8002 this_cpu_inc(*dd->int_counter);
8003
8004 /* This read_csr is really bad in the hot path */
8005 status = read_csr(dd,
8006 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
8007 & sde->imask;
8008 if (likely(status)) {
8009 /* clear the interrupt(s) */
8010 write_csr(dd,
8011 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
8012 status);
8013
8014 /* handle the interrupt(s) */
8015 sdma_engine_interrupt(sde, status);
8016 } else
8017 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8018 sde->this_idx);
8019
8020 return IRQ_HANDLED;
8021}
8022
8023/*
Dean Luickf4f30031c2015-10-26 10:28:44 -04008024 * Clear the receive interrupt, forcing the write and making sure
8025 * we have data from the chip, pushing everything in front of it
8026 * back to the host.
8027 */
8028static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8029{
8030 struct hfi1_devdata *dd = rcd->dd;
8031 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8032
8033 mmiowb(); /* make sure everything before is written */
8034 write_csr(dd, addr, rcd->imask);
8035 /* force the above write on the chip and get a value back */
8036 (void)read_csr(dd, addr);
8037}
8038
8039/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008040void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008041{
8042 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8043}
8044
8045/* return non-zero if a packet is present */
8046static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8047{
8048 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8049 return (rcd->seq_cnt ==
8050 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8051
8052 /* else is RDMA rtail */
8053 return (rcd->head != get_rcvhdrtail(rcd));
8054}
8055
8056/*
8057 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8058 * This routine will try to handle packets immediately (latency), but if
8059 * it finds too many, it will invoke the thread handler (bandwitdh). The
8060 * chip receive interupt is *not* cleared down until this or the thread (if
8061 * invoked) is finished. The intent is to avoid extra interrupts while we
8062 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008063 */
8064static irqreturn_t receive_context_interrupt(int irq, void *data)
8065{
8066 struct hfi1_ctxtdata *rcd = data;
8067 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008068 int disposition;
8069 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008070
8071 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8072 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008073 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008074
Dean Luickf4f30031c2015-10-26 10:28:44 -04008075 /* receive interrupt remains blocked while processing packets */
8076 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008077
Dean Luickf4f30031c2015-10-26 10:28:44 -04008078 /*
8079 * Too many packets were seen while processing packets in this
8080 * IRQ handler. Invoke the handler thread. The receive interrupt
8081 * remains blocked.
8082 */
8083 if (disposition == RCV_PKT_LIMIT)
8084 return IRQ_WAKE_THREAD;
8085
8086 /*
8087 * The packet processor detected no more packets. Clear the receive
8088 * interrupt and recheck for a packet packet that may have arrived
8089 * after the previous check and interrupt clear. If a packet arrived,
8090 * force another interrupt.
8091 */
8092 clear_recv_intr(rcd);
8093 present = check_packet_present(rcd);
8094 if (present)
8095 force_recv_intr(rcd);
8096
8097 return IRQ_HANDLED;
8098}
8099
8100/*
8101 * Receive packet thread handler. This expects to be invoked with the
8102 * receive interrupt still blocked.
8103 */
8104static irqreturn_t receive_context_thread(int irq, void *data)
8105{
8106 struct hfi1_ctxtdata *rcd = data;
8107 int present;
8108
8109 /* receive interrupt is still blocked from the IRQ handler */
8110 (void)rcd->do_interrupt(rcd, 1);
8111
8112 /*
8113 * The packet processor will only return if it detected no more
8114 * packets. Hold IRQs here so we can safely clear the interrupt and
8115 * recheck for a packet that may have arrived after the previous
8116 * check and the interrupt clear. If a packet arrived, force another
8117 * interrupt.
8118 */
8119 local_irq_disable();
8120 clear_recv_intr(rcd);
8121 present = check_packet_present(rcd);
8122 if (present)
8123 force_recv_intr(rcd);
8124 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008125
8126 return IRQ_HANDLED;
8127}
8128
8129/* ========================================================================= */
8130
8131u32 read_physical_state(struct hfi1_devdata *dd)
8132{
8133 u64 reg;
8134
8135 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8136 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8137 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8138}
8139
Jim Snowfb9036d2016-01-11 18:32:21 -05008140u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008141{
8142 u64 reg;
8143
8144 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8145 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8146 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8147}
8148
8149static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8150{
8151 u64 reg;
8152
8153 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8154 /* clear current state, set new state */
8155 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8156 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8157 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8158}
8159
8160/*
8161 * Use the 8051 to read a LCB CSR.
8162 */
8163static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8164{
8165 u32 regno;
8166 int ret;
8167
8168 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8169 if (acquire_lcb_access(dd, 0) == 0) {
8170 *data = read_csr(dd, addr);
8171 release_lcb_access(dd, 0);
8172 return 0;
8173 }
8174 return -EBUSY;
8175 }
8176
8177 /* register is an index of LCB registers: (offset - base) / 8 */
8178 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8179 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8180 if (ret != HCMD_SUCCESS)
8181 return -EBUSY;
8182 return 0;
8183}
8184
8185/*
8186 * Read an LCB CSR. Access may not be in host control, so check.
8187 * Return 0 on success, -EBUSY on failure.
8188 */
8189int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8190{
8191 struct hfi1_pportdata *ppd = dd->pport;
8192
8193 /* if up, go through the 8051 for the value */
8194 if (ppd->host_link_state & HLS_UP)
8195 return read_lcb_via_8051(dd, addr, data);
8196 /* if going up or down, no access */
8197 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8198 return -EBUSY;
8199 /* otherwise, host has access */
8200 *data = read_csr(dd, addr);
8201 return 0;
8202}
8203
8204/*
8205 * Use the 8051 to write a LCB CSR.
8206 */
8207static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8208{
Dean Luick3bf40d62015-11-06 20:07:04 -05008209 u32 regno;
8210 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008211
Dean Luick3bf40d62015-11-06 20:07:04 -05008212 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8213 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8214 if (acquire_lcb_access(dd, 0) == 0) {
8215 write_csr(dd, addr, data);
8216 release_lcb_access(dd, 0);
8217 return 0;
8218 }
8219 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008220 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008221
8222 /* register is an index of LCB registers: (offset - base) / 8 */
8223 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8224 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8225 if (ret != HCMD_SUCCESS)
8226 return -EBUSY;
8227 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008228}
8229
8230/*
8231 * Write an LCB CSR. Access may not be in host control, so check.
8232 * Return 0 on success, -EBUSY on failure.
8233 */
8234int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8235{
8236 struct hfi1_pportdata *ppd = dd->pport;
8237
8238 /* if up, go through the 8051 for the value */
8239 if (ppd->host_link_state & HLS_UP)
8240 return write_lcb_via_8051(dd, addr, data);
8241 /* if going up or down, no access */
8242 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8243 return -EBUSY;
8244 /* otherwise, host has access */
8245 write_csr(dd, addr, data);
8246 return 0;
8247}
8248
8249/*
8250 * Returns:
8251 * < 0 = Linux error, not able to get access
8252 * > 0 = 8051 command RETURN_CODE
8253 */
8254static int do_8051_command(
8255 struct hfi1_devdata *dd,
8256 u32 type,
8257 u64 in_data,
8258 u64 *out_data)
8259{
8260 u64 reg, completed;
8261 int return_code;
8262 unsigned long flags;
8263 unsigned long timeout;
8264
8265 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8266
8267 /*
8268 * Alternative to holding the lock for a long time:
8269 * - keep busy wait - have other users bounce off
8270 */
8271 spin_lock_irqsave(&dd->dc8051_lock, flags);
8272
8273 /* We can't send any commands to the 8051 if it's in reset */
8274 if (dd->dc_shutdown) {
8275 return_code = -ENODEV;
8276 goto fail;
8277 }
8278
8279 /*
8280 * If an 8051 host command timed out previously, then the 8051 is
8281 * stuck.
8282 *
8283 * On first timeout, attempt to reset and restart the entire DC
8284 * block (including 8051). (Is this too big of a hammer?)
8285 *
8286 * If the 8051 times out a second time, the reset did not bring it
8287 * back to healthy life. In that case, fail any subsequent commands.
8288 */
8289 if (dd->dc8051_timed_out) {
8290 if (dd->dc8051_timed_out > 1) {
8291 dd_dev_err(dd,
8292 "Previous 8051 host command timed out, skipping command %u\n",
8293 type);
8294 return_code = -ENXIO;
8295 goto fail;
8296 }
8297 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8298 dc_shutdown(dd);
8299 dc_start(dd);
8300 spin_lock_irqsave(&dd->dc8051_lock, flags);
8301 }
8302
8303 /*
8304 * If there is no timeout, then the 8051 command interface is
8305 * waiting for a command.
8306 */
8307
8308 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008309 * When writing a LCB CSR, out_data contains the full value to
8310 * to be written, while in_data contains the relative LCB
8311 * address in 7:0. Do the work here, rather than the caller,
8312 * of distrubting the write data to where it needs to go:
8313 *
8314 * Write data
8315 * 39:00 -> in_data[47:8]
8316 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8317 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8318 */
8319 if (type == HCMD_WRITE_LCB_CSR) {
8320 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8321 reg = ((((*out_data) >> 40) & 0xff) <<
8322 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8323 | ((((*out_data) >> 48) & 0xffff) <<
8324 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8325 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8326 }
8327
8328 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008329 * Do two writes: the first to stabilize the type and req_data, the
8330 * second to activate.
8331 */
8332 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8333 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8334 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8335 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8336 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8337 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8338 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8339
8340 /* wait for completion, alternate: interrupt */
8341 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8342 while (1) {
8343 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8344 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8345 if (completed)
8346 break;
8347 if (time_after(jiffies, timeout)) {
8348 dd->dc8051_timed_out++;
8349 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8350 if (out_data)
8351 *out_data = 0;
8352 return_code = -ETIMEDOUT;
8353 goto fail;
8354 }
8355 udelay(2);
8356 }
8357
8358 if (out_data) {
8359 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8360 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8361 if (type == HCMD_READ_LCB_CSR) {
8362 /* top 16 bits are in a different register */
8363 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8364 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8365 << (48
8366 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8367 }
8368 }
8369 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8370 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8371 dd->dc8051_timed_out = 0;
8372 /*
8373 * Clear command for next user.
8374 */
8375 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8376
8377fail:
8378 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8379
8380 return return_code;
8381}
8382
8383static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8384{
8385 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8386}
8387
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008388int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8389 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008390{
8391 u64 data;
8392 int ret;
8393
8394 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8395 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8396 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8397 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8398 if (ret != HCMD_SUCCESS) {
8399 dd_dev_err(dd,
8400 "load 8051 config: field id %d, lane %d, err %d\n",
8401 (int)field_id, (int)lane_id, ret);
8402 }
8403 return ret;
8404}
8405
8406/*
8407 * Read the 8051 firmware "registers". Use the RAM directly. Always
8408 * set the result, even on error.
8409 * Return 0 on success, -errno on failure
8410 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008411int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8412 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008413{
8414 u64 big_data;
8415 u32 addr;
8416 int ret;
8417
8418 /* address start depends on the lane_id */
8419 if (lane_id < 4)
8420 addr = (4 * NUM_GENERAL_FIELDS)
8421 + (lane_id * 4 * NUM_LANE_FIELDS);
8422 else
8423 addr = 0;
8424 addr += field_id * 4;
8425
8426 /* read is in 8-byte chunks, hardware will truncate the address down */
8427 ret = read_8051_data(dd, addr, 8, &big_data);
8428
8429 if (ret == 0) {
8430 /* extract the 4 bytes we want */
8431 if (addr & 0x4)
8432 *result = (u32)(big_data >> 32);
8433 else
8434 *result = (u32)big_data;
8435 } else {
8436 *result = 0;
8437 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8438 __func__, lane_id, field_id);
8439 }
8440
8441 return ret;
8442}
8443
8444static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8445 u8 continuous)
8446{
8447 u32 frame;
8448
8449 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8450 | power_management << POWER_MANAGEMENT_SHIFT;
8451 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8452 GENERAL_CONFIG, frame);
8453}
8454
8455static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8456 u16 vl15buf, u8 crc_sizes)
8457{
8458 u32 frame;
8459
8460 frame = (u32)vau << VAU_SHIFT
8461 | (u32)z << Z_SHIFT
8462 | (u32)vcu << VCU_SHIFT
8463 | (u32)vl15buf << VL15BUF_SHIFT
8464 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8465 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8466 GENERAL_CONFIG, frame);
8467}
8468
8469static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8470 u8 *flag_bits, u16 *link_widths)
8471{
8472 u32 frame;
8473
8474 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8475 &frame);
8476 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8477 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8478 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8479}
8480
8481static int write_vc_local_link_width(struct hfi1_devdata *dd,
8482 u8 misc_bits,
8483 u8 flag_bits,
8484 u16 link_widths)
8485{
8486 u32 frame;
8487
8488 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8489 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8490 | (u32)link_widths << LINK_WIDTH_SHIFT;
8491 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8492 frame);
8493}
8494
8495static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8496 u8 device_rev)
8497{
8498 u32 frame;
8499
8500 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8501 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8502 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8503}
8504
8505static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8506 u8 *device_rev)
8507{
8508 u32 frame;
8509
8510 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8511 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8512 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8513 & REMOTE_DEVICE_REV_MASK;
8514}
8515
8516void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8517{
8518 u32 frame;
8519
8520 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8521 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8522 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8523}
8524
8525static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8526 u8 *continuous)
8527{
8528 u32 frame;
8529
8530 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8531 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8532 & POWER_MANAGEMENT_MASK;
8533 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8534 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8535}
8536
8537static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8538 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8539{
8540 u32 frame;
8541
8542 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8543 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8544 *z = (frame >> Z_SHIFT) & Z_MASK;
8545 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8546 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8547 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8548}
8549
8550static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8551 u8 *remote_tx_rate,
8552 u16 *link_widths)
8553{
8554 u32 frame;
8555
8556 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8557 &frame);
8558 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8559 & REMOTE_TX_RATE_MASK;
8560 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8561}
8562
8563static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8564{
8565 u32 frame;
8566
8567 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8568 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8569}
8570
8571static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8572{
8573 u32 frame;
8574
8575 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8576 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8577}
8578
8579static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8580{
8581 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8582}
8583
8584static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8585{
8586 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8587}
8588
8589void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8590{
8591 u32 frame;
8592 int ret;
8593
8594 *link_quality = 0;
8595 if (dd->pport->host_link_state & HLS_UP) {
8596 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8597 &frame);
8598 if (ret == 0)
8599 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8600 & LINK_QUALITY_MASK;
8601 }
8602}
8603
8604static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8605{
8606 u32 frame;
8607
8608 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8609 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8610}
8611
8612static int read_tx_settings(struct hfi1_devdata *dd,
8613 u8 *enable_lane_tx,
8614 u8 *tx_polarity_inversion,
8615 u8 *rx_polarity_inversion,
8616 u8 *max_rate)
8617{
8618 u32 frame;
8619 int ret;
8620
8621 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8622 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8623 & ENABLE_LANE_TX_MASK;
8624 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8625 & TX_POLARITY_INVERSION_MASK;
8626 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8627 & RX_POLARITY_INVERSION_MASK;
8628 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8629 return ret;
8630}
8631
8632static int write_tx_settings(struct hfi1_devdata *dd,
8633 u8 enable_lane_tx,
8634 u8 tx_polarity_inversion,
8635 u8 rx_polarity_inversion,
8636 u8 max_rate)
8637{
8638 u32 frame;
8639
8640 /* no need to mask, all variable sizes match field widths */
8641 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8642 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8643 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8644 | max_rate << MAX_RATE_SHIFT;
8645 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8646}
8647
8648static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8649{
8650 u32 frame, version, prod_id;
8651 int ret, lane;
8652
8653 /* 4 lanes */
8654 for (lane = 0; lane < 4; lane++) {
8655 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8656 if (ret) {
8657 dd_dev_err(
8658 dd,
8659 "Unable to read lane %d firmware details\n",
8660 lane);
8661 continue;
8662 }
8663 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8664 & SPICO_ROM_VERSION_MASK;
8665 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8666 & SPICO_ROM_PROD_ID_MASK;
8667 dd_dev_info(dd,
8668 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8669 lane, version, prod_id);
8670 }
8671}
8672
8673/*
8674 * Read an idle LCB message.
8675 *
8676 * Returns 0 on success, -EINVAL on error
8677 */
8678static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8679{
8680 int ret;
8681
8682 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8683 type, data_out);
8684 if (ret != HCMD_SUCCESS) {
8685 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8686 (u32)type, ret);
8687 return -EINVAL;
8688 }
8689 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8690 /* return only the payload as we already know the type */
8691 *data_out >>= IDLE_PAYLOAD_SHIFT;
8692 return 0;
8693}
8694
8695/*
8696 * Read an idle SMA message. To be done in response to a notification from
8697 * the 8051.
8698 *
8699 * Returns 0 on success, -EINVAL on error
8700 */
8701static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8702{
8703 return read_idle_message(dd,
8704 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8705}
8706
8707/*
8708 * Send an idle LCB message.
8709 *
8710 * Returns 0 on success, -EINVAL on error
8711 */
8712static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8713{
8714 int ret;
8715
8716 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8717 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8718 if (ret != HCMD_SUCCESS) {
8719 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8720 data, ret);
8721 return -EINVAL;
8722 }
8723 return 0;
8724}
8725
8726/*
8727 * Send an idle SMA message.
8728 *
8729 * Returns 0 on success, -EINVAL on error
8730 */
8731int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8732{
8733 u64 data;
8734
8735 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8736 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8737 return send_idle_message(dd, data);
8738}
8739
8740/*
8741 * Initialize the LCB then do a quick link up. This may or may not be
8742 * in loopback.
8743 *
8744 * return 0 on success, -errno on error
8745 */
8746static int do_quick_linkup(struct hfi1_devdata *dd)
8747{
8748 u64 reg;
8749 unsigned long timeout;
8750 int ret;
8751
8752 lcb_shutdown(dd, 0);
8753
8754 if (loopback) {
8755 /* LCB_CFG_LOOPBACK.VAL = 2 */
8756 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8757 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8758 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8759 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8760 }
8761
8762 /* start the LCBs */
8763 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8764 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8765
8766 /* simulator only loopback steps */
8767 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8768 /* LCB_CFG_RUN.EN = 1 */
8769 write_csr(dd, DC_LCB_CFG_RUN,
8770 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8771
8772 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8773 timeout = jiffies + msecs_to_jiffies(10);
8774 while (1) {
8775 reg = read_csr(dd,
8776 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8777 if (reg)
8778 break;
8779 if (time_after(jiffies, timeout)) {
8780 dd_dev_err(dd,
8781 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8782 return -ETIMEDOUT;
8783 }
8784 udelay(2);
8785 }
8786
8787 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8788 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8789 }
8790
8791 if (!loopback) {
8792 /*
8793 * When doing quick linkup and not in loopback, both
8794 * sides must be done with LCB set-up before either
8795 * starts the quick linkup. Put a delay here so that
8796 * both sides can be started and have a chance to be
8797 * done with LCB set up before resuming.
8798 */
8799 dd_dev_err(dd,
8800 "Pausing for peer to be finished with LCB set up\n");
8801 msleep(5000);
8802 dd_dev_err(dd,
8803 "Continuing with quick linkup\n");
8804 }
8805
8806 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8807 set_8051_lcb_access(dd);
8808
8809 /*
8810 * State "quick" LinkUp request sets the physical link state to
8811 * LinkUp without a verify capability sequence.
8812 * This state is in simulator v37 and later.
8813 */
8814 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8815 if (ret != HCMD_SUCCESS) {
8816 dd_dev_err(dd,
8817 "%s: set physical link state to quick LinkUp failed with return %d\n",
8818 __func__, ret);
8819
8820 set_host_lcb_access(dd);
8821 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8822
8823 if (ret >= 0)
8824 ret = -EINVAL;
8825 return ret;
8826 }
8827
8828 return 0; /* success */
8829}
8830
8831/*
8832 * Set the SerDes to internal loopback mode.
8833 * Returns 0 on success, -errno on error.
8834 */
8835static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8836{
8837 int ret;
8838
8839 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8840 if (ret == HCMD_SUCCESS)
8841 return 0;
8842 dd_dev_err(dd,
8843 "Set physical link state to SerDes Loopback failed with return %d\n",
8844 ret);
8845 if (ret >= 0)
8846 ret = -EINVAL;
8847 return ret;
8848}
8849
8850/*
8851 * Do all special steps to set up loopback.
8852 */
8853static int init_loopback(struct hfi1_devdata *dd)
8854{
8855 dd_dev_info(dd, "Entering loopback mode\n");
8856
8857 /* all loopbacks should disable self GUID check */
8858 write_csr(dd, DC_DC8051_CFG_MODE,
8859 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8860
8861 /*
8862 * The simulator has only one loopback option - LCB. Switch
8863 * to that option, which includes quick link up.
8864 *
8865 * Accept all valid loopback values.
8866 */
8867 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8868 && (loopback == LOOPBACK_SERDES
8869 || loopback == LOOPBACK_LCB
8870 || loopback == LOOPBACK_CABLE)) {
8871 loopback = LOOPBACK_LCB;
8872 quick_linkup = 1;
8873 return 0;
8874 }
8875
8876 /* handle serdes loopback */
8877 if (loopback == LOOPBACK_SERDES) {
8878 /* internal serdes loopack needs quick linkup on RTL */
8879 if (dd->icode == ICODE_RTL_SILICON)
8880 quick_linkup = 1;
8881 return set_serdes_loopback_mode(dd);
8882 }
8883
8884 /* LCB loopback - handled at poll time */
8885 if (loopback == LOOPBACK_LCB) {
8886 quick_linkup = 1; /* LCB is always quick linkup */
8887
8888 /* not supported in emulation due to emulation RTL changes */
8889 if (dd->icode == ICODE_FPGA_EMULATION) {
8890 dd_dev_err(dd,
8891 "LCB loopback not supported in emulation\n");
8892 return -EINVAL;
8893 }
8894 return 0;
8895 }
8896
8897 /* external cable loopback requires no extra steps */
8898 if (loopback == LOOPBACK_CABLE)
8899 return 0;
8900
8901 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8902 return -EINVAL;
8903}
8904
8905/*
8906 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8907 * used in the Verify Capability link width attribute.
8908 */
8909static u16 opa_to_vc_link_widths(u16 opa_widths)
8910{
8911 int i;
8912 u16 result = 0;
8913
8914 static const struct link_bits {
8915 u16 from;
8916 u16 to;
8917 } opa_link_xlate[] = {
8918 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8919 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8920 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8921 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8922 };
8923
8924 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8925 if (opa_widths & opa_link_xlate[i].from)
8926 result |= opa_link_xlate[i].to;
8927 }
8928 return result;
8929}
8930
8931/*
8932 * Set link attributes before moving to polling.
8933 */
8934static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8935{
8936 struct hfi1_devdata *dd = ppd->dd;
8937 u8 enable_lane_tx;
8938 u8 tx_polarity_inversion;
8939 u8 rx_polarity_inversion;
8940 int ret;
8941
8942 /* reset our fabric serdes to clear any lingering problems */
8943 fabric_serdes_reset(dd);
8944
8945 /* set the local tx rate - need to read-modify-write */
8946 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8947 &rx_polarity_inversion, &ppd->local_tx_rate);
8948 if (ret)
8949 goto set_local_link_attributes_fail;
8950
8951 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8952 /* set the tx rate to the fastest enabled */
8953 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8954 ppd->local_tx_rate = 1;
8955 else
8956 ppd->local_tx_rate = 0;
8957 } else {
8958 /* set the tx rate to all enabled */
8959 ppd->local_tx_rate = 0;
8960 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8961 ppd->local_tx_rate |= 2;
8962 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8963 ppd->local_tx_rate |= 1;
8964 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008965
8966 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008967 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8968 rx_polarity_inversion, ppd->local_tx_rate);
8969 if (ret != HCMD_SUCCESS)
8970 goto set_local_link_attributes_fail;
8971
8972 /*
8973 * DC supports continuous updates.
8974 */
8975 ret = write_vc_local_phy(dd, 0 /* no power management */,
8976 1 /* continuous updates */);
8977 if (ret != HCMD_SUCCESS)
8978 goto set_local_link_attributes_fail;
8979
8980 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8981 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8982 ppd->port_crc_mode_enabled);
8983 if (ret != HCMD_SUCCESS)
8984 goto set_local_link_attributes_fail;
8985
8986 ret = write_vc_local_link_width(dd, 0, 0,
8987 opa_to_vc_link_widths(ppd->link_width_enabled));
8988 if (ret != HCMD_SUCCESS)
8989 goto set_local_link_attributes_fail;
8990
8991 /* let peer know who we are */
8992 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8993 if (ret == HCMD_SUCCESS)
8994 return 0;
8995
8996set_local_link_attributes_fail:
8997 dd_dev_err(dd,
8998 "Failed to set local link attributes, return 0x%x\n",
8999 ret);
9000 return ret;
9001}
9002
9003/*
9004 * Call this to start the link. Schedule a retry if the cable is not
9005 * present or if unable to start polling. Do not do anything if the
9006 * link is disabled. Returns 0 if link is disabled or moved to polling
9007 */
9008int start_link(struct hfi1_pportdata *ppd)
9009{
9010 if (!ppd->link_enabled) {
9011 dd_dev_info(ppd->dd,
9012 "%s: stopping link start because link is disabled\n",
9013 __func__);
9014 return 0;
9015 }
9016 if (!ppd->driver_link_ready) {
9017 dd_dev_info(ppd->dd,
9018 "%s: stopping link start because driver is not ready\n",
9019 __func__);
9020 return 0;
9021 }
9022
9023 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9024 loopback == LOOPBACK_LCB ||
9025 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9026 return set_link_state(ppd, HLS_DN_POLL);
9027
9028 dd_dev_info(ppd->dd,
9029 "%s: stopping link start because no cable is present\n",
9030 __func__);
9031 return -EAGAIN;
9032}
9033
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009034static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9035{
9036 struct hfi1_devdata *dd = ppd->dd;
9037 u64 mask;
9038 unsigned long timeout;
9039
9040 /*
9041 * Check for QSFP interrupt for t_init (SFF 8679)
9042 */
9043 timeout = jiffies + msecs_to_jiffies(2000);
9044 while (1) {
9045 mask = read_csr(dd, dd->hfi1_id ?
9046 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9047 if (!(mask & QSFP_HFI0_INT_N)) {
9048 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9049 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9050 break;
9051 }
9052 if (time_after(jiffies, timeout)) {
9053 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9054 __func__);
9055 break;
9056 }
9057 udelay(2);
9058 }
9059}
9060
9061static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9062{
9063 struct hfi1_devdata *dd = ppd->dd;
9064 u64 mask;
9065
9066 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9067 if (enable)
9068 mask |= (u64)QSFP_HFI0_INT_N;
9069 else
9070 mask &= ~(u64)QSFP_HFI0_INT_N;
9071 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9072}
9073
9074void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009075{
9076 struct hfi1_devdata *dd = ppd->dd;
9077 u64 mask, qsfp_mask;
9078
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009079 /* Disable INT_N from triggering QSFP interrupts */
9080 set_qsfp_int_n(ppd, 0);
9081
9082 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009083 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009084 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009085 qsfp_mask |= mask;
9086 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009087 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009088
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009089 qsfp_mask = read_csr(dd, dd->hfi1_id ?
9090 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009091 qsfp_mask &= ~mask;
9092 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009093 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009094
9095 udelay(10);
9096
9097 qsfp_mask |= mask;
9098 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009099 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9100
9101 wait_for_qsfp_init(ppd);
9102
9103 /*
9104 * Allow INT_N to trigger the QSFP interrupt to watch
9105 * for alarms and warnings
9106 */
9107 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009108}
9109
9110static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9111 u8 *qsfp_interrupt_status)
9112{
9113 struct hfi1_devdata *dd = ppd->dd;
9114
9115 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9116 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9117 dd_dev_info(dd,
9118 "%s: QSFP cable on fire\n",
9119 __func__);
9120
9121 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9122 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9123 dd_dev_info(dd,
9124 "%s: QSFP cable temperature too low\n",
9125 __func__);
9126
9127 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9128 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9129 dd_dev_info(dd,
9130 "%s: QSFP supply voltage too high\n",
9131 __func__);
9132
9133 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9134 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9135 dd_dev_info(dd,
9136 "%s: QSFP supply voltage too low\n",
9137 __func__);
9138
9139 /* Byte 2 is vendor specific */
9140
9141 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9142 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9143 dd_dev_info(dd,
9144 "%s: Cable RX channel 1/2 power too high\n",
9145 __func__);
9146
9147 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9148 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9149 dd_dev_info(dd,
9150 "%s: Cable RX channel 1/2 power too low\n",
9151 __func__);
9152
9153 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9154 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9155 dd_dev_info(dd,
9156 "%s: Cable RX channel 3/4 power too high\n",
9157 __func__);
9158
9159 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9160 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9161 dd_dev_info(dd,
9162 "%s: Cable RX channel 3/4 power too low\n",
9163 __func__);
9164
9165 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9166 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9167 dd_dev_info(dd,
9168 "%s: Cable TX channel 1/2 bias too high\n",
9169 __func__);
9170
9171 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9172 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9173 dd_dev_info(dd,
9174 "%s: Cable TX channel 1/2 bias too low\n",
9175 __func__);
9176
9177 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9178 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9179 dd_dev_info(dd,
9180 "%s: Cable TX channel 3/4 bias too high\n",
9181 __func__);
9182
9183 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9184 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9185 dd_dev_info(dd,
9186 "%s: Cable TX channel 3/4 bias too low\n",
9187 __func__);
9188
9189 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9190 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9191 dd_dev_info(dd,
9192 "%s: Cable TX channel 1/2 power too high\n",
9193 __func__);
9194
9195 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9196 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9197 dd_dev_info(dd,
9198 "%s: Cable TX channel 1/2 power too low\n",
9199 __func__);
9200
9201 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9202 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9203 dd_dev_info(dd,
9204 "%s: Cable TX channel 3/4 power too high\n",
9205 __func__);
9206
9207 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9208 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9209 dd_dev_info(dd,
9210 "%s: Cable TX channel 3/4 power too low\n",
9211 __func__);
9212
9213 /* Bytes 9-10 and 11-12 are reserved */
9214 /* Bytes 13-15 are vendor specific */
9215
9216 return 0;
9217}
9218
Mike Marciniszyn77241052015-07-30 15:17:43 -04009219/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009220void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009221{
9222 struct qsfp_data *qd;
9223 struct hfi1_pportdata *ppd;
9224 struct hfi1_devdata *dd;
9225
9226 qd = container_of(work, struct qsfp_data, qsfp_work);
9227 ppd = qd->ppd;
9228 dd = ppd->dd;
9229
9230 /* Sanity check */
9231 if (!qsfp_mod_present(ppd))
9232 return;
9233
9234 /*
9235 * Turn DC back on after cables has been
9236 * re-inserted. Up until now, the DC has been in
9237 * reset to save power.
9238 */
9239 dc_start(dd);
9240
9241 if (qd->cache_refresh_required) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009242
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009243 set_qsfp_int_n(ppd, 0);
9244
9245 wait_for_qsfp_init(ppd);
9246
9247 /*
9248 * Allow INT_N to trigger the QSFP interrupt to watch
9249 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009250 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009251 set_qsfp_int_n(ppd, 1);
9252
9253 tune_serdes(ppd);
9254
9255 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009256 }
9257
9258 if (qd->check_interrupt_flags) {
9259 u8 qsfp_interrupt_status[16] = {0,};
9260
9261 if (qsfp_read(ppd, dd->hfi1_id, 6,
9262 &qsfp_interrupt_status[0], 16) != 16) {
9263 dd_dev_info(dd,
9264 "%s: Failed to read status of QSFP module\n",
9265 __func__);
9266 } else {
9267 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009268
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009269 handle_qsfp_error_conditions(
9270 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009271 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9272 ppd->qsfp_info.check_interrupt_flags = 0;
9273 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9274 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009275 }
9276 }
9277}
9278
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009279static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009280{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009281 struct hfi1_pportdata *ppd = dd->pport;
9282 u64 qsfp_mask, cce_int_mask;
9283 const int qsfp1_int_smask = QSFP1_INT % 64;
9284 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009285
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009286 /*
9287 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9288 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9289 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9290 * the index of the appropriate CSR in the CCEIntMask CSR array
9291 */
9292 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9293 (8 * (QSFP1_INT / 64)));
9294 if (dd->hfi1_id) {
9295 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9296 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9297 cce_int_mask);
9298 } else {
9299 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9300 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9301 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009302 }
9303
Mike Marciniszyn77241052015-07-30 15:17:43 -04009304 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9305 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009306 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9307 qsfp_mask);
9308 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9309 qsfp_mask);
9310
9311 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009312
9313 /* Handle active low nature of INT_N and MODPRST_N pins */
9314 if (qsfp_mod_present(ppd))
9315 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9316 write_csr(dd,
9317 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9318 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009319}
9320
Dean Luickbbdeb332015-12-01 15:38:15 -05009321/*
9322 * Do a one-time initialize of the LCB block.
9323 */
9324static void init_lcb(struct hfi1_devdata *dd)
9325{
Dean Luicka59329d2016-02-03 14:32:31 -08009326 /* simulator does not correctly handle LCB cclk loopback, skip */
9327 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9328 return;
9329
Dean Luickbbdeb332015-12-01 15:38:15 -05009330 /* the DC has been reset earlier in the driver load */
9331
9332 /* set LCB for cclk loopback on the port */
9333 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9334 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9335 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9336 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9337 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9338 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9339 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9340}
9341
Mike Marciniszyn77241052015-07-30 15:17:43 -04009342int bringup_serdes(struct hfi1_pportdata *ppd)
9343{
9344 struct hfi1_devdata *dd = ppd->dd;
9345 u64 guid;
9346 int ret;
9347
9348 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9349 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9350
9351 guid = ppd->guid;
9352 if (!guid) {
9353 if (dd->base_guid)
9354 guid = dd->base_guid + ppd->port - 1;
9355 ppd->guid = guid;
9356 }
9357
Mike Marciniszyn77241052015-07-30 15:17:43 -04009358 /* Set linkinit_reason on power up per OPA spec */
9359 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9360
Dean Luickbbdeb332015-12-01 15:38:15 -05009361 /* one-time init of the LCB */
9362 init_lcb(dd);
9363
Mike Marciniszyn77241052015-07-30 15:17:43 -04009364 if (loopback) {
9365 ret = init_loopback(dd);
9366 if (ret < 0)
9367 return ret;
9368 }
9369
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009370 /* tune the SERDES to a ballpark setting for
9371 * optimal signal and bit error rate
9372 * Needs to be done before starting the link
9373 */
9374 tune_serdes(ppd);
9375
Mike Marciniszyn77241052015-07-30 15:17:43 -04009376 return start_link(ppd);
9377}
9378
9379void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9380{
9381 struct hfi1_devdata *dd = ppd->dd;
9382
9383 /*
9384 * Shut down the link and keep it down. First turn off that the
9385 * driver wants to allow the link to be up (driver_link_ready).
9386 * Then make sure the link is not automatically restarted
9387 * (link_enabled). Cancel any pending restart. And finally
9388 * go offline.
9389 */
9390 ppd->driver_link_ready = 0;
9391 ppd->link_enabled = 0;
9392
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009393 ppd->offline_disabled_reason =
9394 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009395 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9396 OPA_LINKDOWN_REASON_SMA_DISABLED);
9397 set_link_state(ppd, HLS_DN_OFFLINE);
9398
9399 /* disable the port */
9400 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9401}
9402
9403static inline int init_cpu_counters(struct hfi1_devdata *dd)
9404{
9405 struct hfi1_pportdata *ppd;
9406 int i;
9407
9408 ppd = (struct hfi1_pportdata *)(dd + 1);
9409 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009410 ppd->ibport_data.rvp.rc_acks = NULL;
9411 ppd->ibport_data.rvp.rc_qacks = NULL;
9412 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9413 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9414 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9415 if (!ppd->ibport_data.rvp.rc_acks ||
9416 !ppd->ibport_data.rvp.rc_delayed_comp ||
9417 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009418 return -ENOMEM;
9419 }
9420
9421 return 0;
9422}
9423
9424static const char * const pt_names[] = {
9425 "expected",
9426 "eager",
9427 "invalid"
9428};
9429
9430static const char *pt_name(u32 type)
9431{
9432 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9433}
9434
9435/*
9436 * index is the index into the receive array
9437 */
9438void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9439 u32 type, unsigned long pa, u16 order)
9440{
9441 u64 reg;
9442 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9443 (dd->kregbase + RCV_ARRAY));
9444
9445 if (!(dd->flags & HFI1_PRESENT))
9446 goto done;
9447
9448 if (type == PT_INVALID) {
9449 pa = 0;
9450 } else if (type > PT_INVALID) {
9451 dd_dev_err(dd,
9452 "unexpected receive array type %u for index %u, not handled\n",
9453 type, index);
9454 goto done;
9455 }
9456
9457 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9458 pt_name(type), index, pa, (unsigned long)order);
9459
9460#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9461 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9462 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9463 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9464 << RCV_ARRAY_RT_ADDR_SHIFT;
9465 writeq(reg, base + (index * 8));
9466
9467 if (type == PT_EAGER)
9468 /*
9469 * Eager entries are written one-by-one so we have to push them
9470 * after we write the entry.
9471 */
9472 flush_wc();
9473done:
9474 return;
9475}
9476
9477void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9478{
9479 struct hfi1_devdata *dd = rcd->dd;
9480 u32 i;
9481
9482 /* this could be optimized */
9483 for (i = rcd->eager_base; i < rcd->eager_base +
9484 rcd->egrbufs.alloced; i++)
9485 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9486
9487 for (i = rcd->expected_base;
9488 i < rcd->expected_base + rcd->expected_count; i++)
9489 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9490}
9491
9492int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9493 struct hfi1_ctxt_info *kinfo)
9494{
9495 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9496 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9497 return 0;
9498}
9499
9500struct hfi1_message_header *hfi1_get_msgheader(
9501 struct hfi1_devdata *dd, __le32 *rhf_addr)
9502{
9503 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9504
9505 return (struct hfi1_message_header *)
9506 (rhf_addr - dd->rhf_offset + offset);
9507}
9508
9509static const char * const ib_cfg_name_strings[] = {
9510 "HFI1_IB_CFG_LIDLMC",
9511 "HFI1_IB_CFG_LWID_DG_ENB",
9512 "HFI1_IB_CFG_LWID_ENB",
9513 "HFI1_IB_CFG_LWID",
9514 "HFI1_IB_CFG_SPD_ENB",
9515 "HFI1_IB_CFG_SPD",
9516 "HFI1_IB_CFG_RXPOL_ENB",
9517 "HFI1_IB_CFG_LREV_ENB",
9518 "HFI1_IB_CFG_LINKLATENCY",
9519 "HFI1_IB_CFG_HRTBT",
9520 "HFI1_IB_CFG_OP_VLS",
9521 "HFI1_IB_CFG_VL_HIGH_CAP",
9522 "HFI1_IB_CFG_VL_LOW_CAP",
9523 "HFI1_IB_CFG_OVERRUN_THRESH",
9524 "HFI1_IB_CFG_PHYERR_THRESH",
9525 "HFI1_IB_CFG_LINKDEFAULT",
9526 "HFI1_IB_CFG_PKEYS",
9527 "HFI1_IB_CFG_MTU",
9528 "HFI1_IB_CFG_LSTATE",
9529 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9530 "HFI1_IB_CFG_PMA_TICKS",
9531 "HFI1_IB_CFG_PORT"
9532};
9533
9534static const char *ib_cfg_name(int which)
9535{
9536 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9537 return "invalid";
9538 return ib_cfg_name_strings[which];
9539}
9540
9541int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9542{
9543 struct hfi1_devdata *dd = ppd->dd;
9544 int val = 0;
9545
9546 switch (which) {
9547 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9548 val = ppd->link_width_enabled;
9549 break;
9550 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9551 val = ppd->link_width_active;
9552 break;
9553 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9554 val = ppd->link_speed_enabled;
9555 break;
9556 case HFI1_IB_CFG_SPD: /* current Link speed */
9557 val = ppd->link_speed_active;
9558 break;
9559
9560 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9561 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9562 case HFI1_IB_CFG_LINKLATENCY:
9563 goto unimplemented;
9564
9565 case HFI1_IB_CFG_OP_VLS:
9566 val = ppd->vls_operational;
9567 break;
9568 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9569 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9570 break;
9571 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9572 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9573 break;
9574 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9575 val = ppd->overrun_threshold;
9576 break;
9577 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9578 val = ppd->phy_error_threshold;
9579 break;
9580 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9581 val = dd->link_default;
9582 break;
9583
9584 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9585 case HFI1_IB_CFG_PMA_TICKS:
9586 default:
9587unimplemented:
9588 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9589 dd_dev_info(
9590 dd,
9591 "%s: which %s: not implemented\n",
9592 __func__,
9593 ib_cfg_name(which));
9594 break;
9595 }
9596
9597 return val;
9598}
9599
9600/*
9601 * The largest MAD packet size.
9602 */
9603#define MAX_MAD_PACKET 2048
9604
9605/*
9606 * Return the maximum header bytes that can go on the _wire_
9607 * for this device. This count includes the ICRC which is
9608 * not part of the packet held in memory but it is appended
9609 * by the HW.
9610 * This is dependent on the device's receive header entry size.
9611 * HFI allows this to be set per-receive context, but the
9612 * driver presently enforces a global value.
9613 */
9614u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9615{
9616 /*
9617 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9618 * the Receive Header Entry Size minus the PBC (or RHF) size
9619 * plus one DW for the ICRC appended by HW.
9620 *
9621 * dd->rcd[0].rcvhdrqentsize is in DW.
9622 * We use rcd[0] as all context will have the same value. Also,
9623 * the first kernel context would have been allocated by now so
9624 * we are guaranteed a valid value.
9625 */
9626 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9627}
9628
9629/*
9630 * Set Send Length
9631 * @ppd - per port data
9632 *
9633 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9634 * registers compare against LRH.PktLen, so use the max bytes included
9635 * in the LRH.
9636 *
9637 * This routine changes all VL values except VL15, which it maintains at
9638 * the same value.
9639 */
9640static void set_send_length(struct hfi1_pportdata *ppd)
9641{
9642 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009643 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9644 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009645 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9646 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9647 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9648 int i;
9649
9650 for (i = 0; i < ppd->vls_supported; i++) {
9651 if (dd->vld[i].mtu > maxvlmtu)
9652 maxvlmtu = dd->vld[i].mtu;
9653 if (i <= 3)
9654 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9655 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9656 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9657 else
9658 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9659 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9660 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9661 }
9662 write_csr(dd, SEND_LEN_CHECK0, len1);
9663 write_csr(dd, SEND_LEN_CHECK1, len2);
9664 /* adjust kernel credit return thresholds based on new MTUs */
9665 /* all kernel receive contexts have the same hdrqentsize */
9666 for (i = 0; i < ppd->vls_supported; i++) {
9667 sc_set_cr_threshold(dd->vld[i].sc,
9668 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9669 dd->rcd[0]->rcvhdrqentsize));
9670 }
9671 sc_set_cr_threshold(dd->vld[15].sc,
9672 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9673 dd->rcd[0]->rcvhdrqentsize));
9674
9675 /* Adjust maximum MTU for the port in DC */
9676 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9677 (ilog2(maxvlmtu >> 8) + 1);
9678 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9679 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9680 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9681 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9682 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9683}
9684
9685static void set_lidlmc(struct hfi1_pportdata *ppd)
9686{
9687 int i;
9688 u64 sreg = 0;
9689 struct hfi1_devdata *dd = ppd->dd;
9690 u32 mask = ~((1U << ppd->lmc) - 1);
9691 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9692
9693 if (dd->hfi1_snoop.mode_flag)
9694 dd_dev_info(dd, "Set lid/lmc while snooping");
9695
9696 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9697 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9698 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9699 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9700 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9701 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9702 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9703
9704 /*
9705 * Iterate over all the send contexts and set their SLID check
9706 */
9707 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9708 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9709 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9710 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9711
9712 for (i = 0; i < dd->chip_send_contexts; i++) {
9713 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9714 i, (u32)sreg);
9715 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9716 }
9717
9718 /* Now we have to do the same thing for the sdma engines */
9719 sdma_update_lmc(dd, mask, ppd->lid);
9720}
9721
9722static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9723{
9724 unsigned long timeout;
9725 u32 curr_state;
9726
9727 timeout = jiffies + msecs_to_jiffies(msecs);
9728 while (1) {
9729 curr_state = read_physical_state(dd);
9730 if (curr_state == state)
9731 break;
9732 if (time_after(jiffies, timeout)) {
9733 dd_dev_err(dd,
9734 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9735 state, curr_state);
9736 return -ETIMEDOUT;
9737 }
9738 usleep_range(1950, 2050); /* sleep 2ms-ish */
9739 }
9740
9741 return 0;
9742}
9743
9744/*
9745 * Helper for set_link_state(). Do not call except from that routine.
9746 * Expects ppd->hls_mutex to be held.
9747 *
9748 * @rem_reason value to be sent to the neighbor
9749 *
9750 * LinkDownReasons only set if transition succeeds.
9751 */
9752static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9753{
9754 struct hfi1_devdata *dd = ppd->dd;
9755 u32 pstate, previous_state;
9756 u32 last_local_state;
9757 u32 last_remote_state;
9758 int ret;
9759 int do_transition;
9760 int do_wait;
9761
9762 previous_state = ppd->host_link_state;
9763 ppd->host_link_state = HLS_GOING_OFFLINE;
9764 pstate = read_physical_state(dd);
9765 if (pstate == PLS_OFFLINE) {
9766 do_transition = 0; /* in right state */
9767 do_wait = 0; /* ...no need to wait */
9768 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9769 do_transition = 0; /* in an offline transient state */
9770 do_wait = 1; /* ...wait for it to settle */
9771 } else {
9772 do_transition = 1; /* need to move to offline */
9773 do_wait = 1; /* ...will need to wait */
9774 }
9775
9776 if (do_transition) {
9777 ret = set_physical_link_state(dd,
9778 PLS_OFFLINE | (rem_reason << 8));
9779
9780 if (ret != HCMD_SUCCESS) {
9781 dd_dev_err(dd,
9782 "Failed to transition to Offline link state, return %d\n",
9783 ret);
9784 return -EINVAL;
9785 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009786 if (ppd->offline_disabled_reason ==
9787 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009788 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009789 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009790 }
9791
9792 if (do_wait) {
9793 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009794 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009795 if (ret < 0)
9796 return ret;
9797 }
9798
9799 /* make sure the logical state is also down */
9800 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9801
9802 /*
9803 * Now in charge of LCB - must be after the physical state is
9804 * offline.quiet and before host_link_state is changed.
9805 */
9806 set_host_lcb_access(dd);
9807 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9808 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9809
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009810 if (ppd->port_type == PORT_TYPE_QSFP &&
9811 ppd->qsfp_info.limiting_active &&
9812 qsfp_mod_present(ppd)) {
9813 set_qsfp_tx(ppd, 0);
9814 }
9815
Mike Marciniszyn77241052015-07-30 15:17:43 -04009816 /*
9817 * The LNI has a mandatory wait time after the physical state
9818 * moves to Offline.Quiet. The wait time may be different
9819 * depending on how the link went down. The 8051 firmware
9820 * will observe the needed wait time and only move to ready
9821 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009822 * is 6s, so wait that long and then at least 0.5s more for
9823 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009824 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009825 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009826 if (ret) {
9827 dd_dev_err(dd,
9828 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9829 /* state is really offline, so make it so */
9830 ppd->host_link_state = HLS_DN_OFFLINE;
9831 return ret;
9832 }
9833
9834 /*
9835 * The state is now offline and the 8051 is ready to accept host
9836 * requests.
9837 * - change our state
9838 * - notify others if we were previously in a linkup state
9839 */
9840 ppd->host_link_state = HLS_DN_OFFLINE;
9841 if (previous_state & HLS_UP) {
9842 /* went down while link was up */
9843 handle_linkup_change(dd, 0);
9844 } else if (previous_state
9845 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9846 /* went down while attempting link up */
9847 /* byte 1 of last_*_state is the failure reason */
9848 read_last_local_state(dd, &last_local_state);
9849 read_last_remote_state(dd, &last_remote_state);
9850 dd_dev_err(dd,
9851 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9852 last_local_state, last_remote_state);
9853 }
9854
9855 /* the active link width (downgrade) is 0 on link down */
9856 ppd->link_width_active = 0;
9857 ppd->link_width_downgrade_tx_active = 0;
9858 ppd->link_width_downgrade_rx_active = 0;
9859 ppd->current_egress_rate = 0;
9860 return 0;
9861}
9862
9863/* return the link state name */
9864static const char *link_state_name(u32 state)
9865{
9866 const char *name;
9867 int n = ilog2(state);
9868 static const char * const names[] = {
9869 [__HLS_UP_INIT_BP] = "INIT",
9870 [__HLS_UP_ARMED_BP] = "ARMED",
9871 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9872 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9873 [__HLS_DN_POLL_BP] = "POLL",
9874 [__HLS_DN_DISABLE_BP] = "DISABLE",
9875 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9876 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9877 [__HLS_GOING_UP_BP] = "GOING_UP",
9878 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9879 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9880 };
9881
9882 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9883 return name ? name : "unknown";
9884}
9885
9886/* return the link state reason name */
9887static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9888{
9889 if (state == HLS_UP_INIT) {
9890 switch (ppd->linkinit_reason) {
9891 case OPA_LINKINIT_REASON_LINKUP:
9892 return "(LINKUP)";
9893 case OPA_LINKINIT_REASON_FLAPPING:
9894 return "(FLAPPING)";
9895 case OPA_LINKINIT_OUTSIDE_POLICY:
9896 return "(OUTSIDE_POLICY)";
9897 case OPA_LINKINIT_QUARANTINED:
9898 return "(QUARANTINED)";
9899 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9900 return "(INSUFIC_CAPABILITY)";
9901 default:
9902 break;
9903 }
9904 }
9905 return "";
9906}
9907
9908/*
9909 * driver_physical_state - convert the driver's notion of a port's
9910 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9911 * Return -1 (converted to a u32) to indicate error.
9912 */
9913u32 driver_physical_state(struct hfi1_pportdata *ppd)
9914{
9915 switch (ppd->host_link_state) {
9916 case HLS_UP_INIT:
9917 case HLS_UP_ARMED:
9918 case HLS_UP_ACTIVE:
9919 return IB_PORTPHYSSTATE_LINKUP;
9920 case HLS_DN_POLL:
9921 return IB_PORTPHYSSTATE_POLLING;
9922 case HLS_DN_DISABLE:
9923 return IB_PORTPHYSSTATE_DISABLED;
9924 case HLS_DN_OFFLINE:
9925 return OPA_PORTPHYSSTATE_OFFLINE;
9926 case HLS_VERIFY_CAP:
9927 return IB_PORTPHYSSTATE_POLLING;
9928 case HLS_GOING_UP:
9929 return IB_PORTPHYSSTATE_POLLING;
9930 case HLS_GOING_OFFLINE:
9931 return OPA_PORTPHYSSTATE_OFFLINE;
9932 case HLS_LINK_COOLDOWN:
9933 return OPA_PORTPHYSSTATE_OFFLINE;
9934 case HLS_DN_DOWNDEF:
9935 default:
9936 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9937 ppd->host_link_state);
9938 return -1;
9939 }
9940}
9941
9942/*
9943 * driver_logical_state - convert the driver's notion of a port's
9944 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9945 * (converted to a u32) to indicate error.
9946 */
9947u32 driver_logical_state(struct hfi1_pportdata *ppd)
9948{
9949 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9950 return IB_PORT_DOWN;
9951
9952 switch (ppd->host_link_state & HLS_UP) {
9953 case HLS_UP_INIT:
9954 return IB_PORT_INIT;
9955 case HLS_UP_ARMED:
9956 return IB_PORT_ARMED;
9957 case HLS_UP_ACTIVE:
9958 return IB_PORT_ACTIVE;
9959 default:
9960 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9961 ppd->host_link_state);
9962 return -1;
9963 }
9964}
9965
9966void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9967 u8 neigh_reason, u8 rem_reason)
9968{
9969 if (ppd->local_link_down_reason.latest == 0 &&
9970 ppd->neigh_link_down_reason.latest == 0) {
9971 ppd->local_link_down_reason.latest = lcl_reason;
9972 ppd->neigh_link_down_reason.latest = neigh_reason;
9973 ppd->remote_link_down_reason = rem_reason;
9974 }
9975}
9976
9977/*
9978 * Change the physical and/or logical link state.
9979 *
9980 * Do not call this routine while inside an interrupt. It contains
9981 * calls to routines that can take multiple seconds to finish.
9982 *
9983 * Returns 0 on success, -errno on failure.
9984 */
9985int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9986{
9987 struct hfi1_devdata *dd = ppd->dd;
9988 struct ib_event event = {.device = NULL};
9989 int ret1, ret = 0;
9990 int was_up, is_down;
9991 int orig_new_state, poll_bounce;
9992
9993 mutex_lock(&ppd->hls_lock);
9994
9995 orig_new_state = state;
9996 if (state == HLS_DN_DOWNDEF)
9997 state = dd->link_default;
9998
9999 /* interpret poll -> poll as a link bounce */
10000 poll_bounce = ppd->host_link_state == HLS_DN_POLL
10001 && state == HLS_DN_POLL;
10002
10003 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10004 link_state_name(ppd->host_link_state),
10005 link_state_name(orig_new_state),
10006 poll_bounce ? "(bounce) " : "",
10007 link_state_reason_name(ppd, state));
10008
10009 was_up = !!(ppd->host_link_state & HLS_UP);
10010
10011 /*
10012 * If we're going to a (HLS_*) link state that implies the logical
10013 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10014 * reset is_sm_config_started to 0.
10015 */
10016 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10017 ppd->is_sm_config_started = 0;
10018
10019 /*
10020 * Do nothing if the states match. Let a poll to poll link bounce
10021 * go through.
10022 */
10023 if (ppd->host_link_state == state && !poll_bounce)
10024 goto done;
10025
10026 switch (state) {
10027 case HLS_UP_INIT:
10028 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
10029 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10030 /*
10031 * Quick link up jumps from polling to here.
10032 *
10033 * Whether in normal or loopback mode, the
10034 * simulator jumps from polling to link up.
10035 * Accept that here.
10036 */
10037 /* OK */;
10038 } else if (ppd->host_link_state != HLS_GOING_UP) {
10039 goto unexpected;
10040 }
10041
10042 ppd->host_link_state = HLS_UP_INIT;
10043 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10044 if (ret) {
10045 /* logical state didn't change, stay at going_up */
10046 ppd->host_link_state = HLS_GOING_UP;
10047 dd_dev_err(dd,
10048 "%s: logical state did not change to INIT\n",
10049 __func__);
10050 } else {
10051 /* clear old transient LINKINIT_REASON code */
10052 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10053 ppd->linkinit_reason =
10054 OPA_LINKINIT_REASON_LINKUP;
10055
10056 /* enable the port */
10057 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10058
10059 handle_linkup_change(dd, 1);
10060 }
10061 break;
10062 case HLS_UP_ARMED:
10063 if (ppd->host_link_state != HLS_UP_INIT)
10064 goto unexpected;
10065
10066 ppd->host_link_state = HLS_UP_ARMED;
10067 set_logical_state(dd, LSTATE_ARMED);
10068 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10069 if (ret) {
10070 /* logical state didn't change, stay at init */
10071 ppd->host_link_state = HLS_UP_INIT;
10072 dd_dev_err(dd,
10073 "%s: logical state did not change to ARMED\n",
10074 __func__);
10075 }
10076 /*
10077 * The simulator does not currently implement SMA messages,
10078 * so neighbor_normal is not set. Set it here when we first
10079 * move to Armed.
10080 */
10081 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10082 ppd->neighbor_normal = 1;
10083 break;
10084 case HLS_UP_ACTIVE:
10085 if (ppd->host_link_state != HLS_UP_ARMED)
10086 goto unexpected;
10087
10088 ppd->host_link_state = HLS_UP_ACTIVE;
10089 set_logical_state(dd, LSTATE_ACTIVE);
10090 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10091 if (ret) {
10092 /* logical state didn't change, stay at armed */
10093 ppd->host_link_state = HLS_UP_ARMED;
10094 dd_dev_err(dd,
10095 "%s: logical state did not change to ACTIVE\n",
10096 __func__);
10097 } else {
10098
10099 /* tell all engines to go running */
10100 sdma_all_running(dd);
10101
10102 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010103 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010104 event.element.port_num = ppd->port;
10105 event.event = IB_EVENT_PORT_ACTIVE;
10106 }
10107 break;
10108 case HLS_DN_POLL:
10109 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10110 ppd->host_link_state == HLS_DN_OFFLINE) &&
10111 dd->dc_shutdown)
10112 dc_start(dd);
10113 /* Hand LED control to the DC */
10114 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10115
10116 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10117 u8 tmp = ppd->link_enabled;
10118
10119 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10120 if (ret) {
10121 ppd->link_enabled = tmp;
10122 break;
10123 }
10124 ppd->remote_link_down_reason = 0;
10125
10126 if (ppd->driver_link_ready)
10127 ppd->link_enabled = 1;
10128 }
10129
Jim Snowfb9036d2016-01-11 18:32:21 -050010130 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010131 ret = set_local_link_attributes(ppd);
10132 if (ret)
10133 break;
10134
10135 ppd->port_error_action = 0;
10136 ppd->host_link_state = HLS_DN_POLL;
10137
10138 if (quick_linkup) {
10139 /* quick linkup does not go into polling */
10140 ret = do_quick_linkup(dd);
10141 } else {
10142 ret1 = set_physical_link_state(dd, PLS_POLLING);
10143 if (ret1 != HCMD_SUCCESS) {
10144 dd_dev_err(dd,
10145 "Failed to transition to Polling link state, return 0x%x\n",
10146 ret1);
10147 ret = -EINVAL;
10148 }
10149 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010150 ppd->offline_disabled_reason =
10151 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010152 /*
10153 * If an error occurred above, go back to offline. The
10154 * caller may reschedule another attempt.
10155 */
10156 if (ret)
10157 goto_offline(ppd, 0);
10158 break;
10159 case HLS_DN_DISABLE:
10160 /* link is disabled */
10161 ppd->link_enabled = 0;
10162
10163 /* allow any state to transition to disabled */
10164
10165 /* must transition to offline first */
10166 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10167 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10168 if (ret)
10169 break;
10170 ppd->remote_link_down_reason = 0;
10171 }
10172
10173 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10174 if (ret1 != HCMD_SUCCESS) {
10175 dd_dev_err(dd,
10176 "Failed to transition to Disabled link state, return 0x%x\n",
10177 ret1);
10178 ret = -EINVAL;
10179 break;
10180 }
10181 ppd->host_link_state = HLS_DN_DISABLE;
10182 dc_shutdown(dd);
10183 break;
10184 case HLS_DN_OFFLINE:
10185 if (ppd->host_link_state == HLS_DN_DISABLE)
10186 dc_start(dd);
10187
10188 /* allow any state to transition to offline */
10189 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10190 if (!ret)
10191 ppd->remote_link_down_reason = 0;
10192 break;
10193 case HLS_VERIFY_CAP:
10194 if (ppd->host_link_state != HLS_DN_POLL)
10195 goto unexpected;
10196 ppd->host_link_state = HLS_VERIFY_CAP;
10197 break;
10198 case HLS_GOING_UP:
10199 if (ppd->host_link_state != HLS_VERIFY_CAP)
10200 goto unexpected;
10201
10202 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10203 if (ret1 != HCMD_SUCCESS) {
10204 dd_dev_err(dd,
10205 "Failed to transition to link up state, return 0x%x\n",
10206 ret1);
10207 ret = -EINVAL;
10208 break;
10209 }
10210 ppd->host_link_state = HLS_GOING_UP;
10211 break;
10212
10213 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10214 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10215 default:
10216 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10217 __func__, state);
10218 ret = -EINVAL;
10219 break;
10220 }
10221
10222 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10223 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10224
10225 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10226 ppd->neigh_link_down_reason.sma == 0) {
10227 ppd->local_link_down_reason.sma =
10228 ppd->local_link_down_reason.latest;
10229 ppd->neigh_link_down_reason.sma =
10230 ppd->neigh_link_down_reason.latest;
10231 }
10232
10233 goto done;
10234
10235unexpected:
10236 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10237 __func__, link_state_name(ppd->host_link_state),
10238 link_state_name(state));
10239 ret = -EINVAL;
10240
10241done:
10242 mutex_unlock(&ppd->hls_lock);
10243
10244 if (event.device)
10245 ib_dispatch_event(&event);
10246
10247 return ret;
10248}
10249
10250int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10251{
10252 u64 reg;
10253 int ret = 0;
10254
10255 switch (which) {
10256 case HFI1_IB_CFG_LIDLMC:
10257 set_lidlmc(ppd);
10258 break;
10259 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10260 /*
10261 * The VL Arbitrator high limit is sent in units of 4k
10262 * bytes, while HFI stores it in units of 64 bytes.
10263 */
10264 val *= 4096/64;
10265 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10266 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10267 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10268 break;
10269 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10270 /* HFI only supports POLL as the default link down state */
10271 if (val != HLS_DN_POLL)
10272 ret = -EINVAL;
10273 break;
10274 case HFI1_IB_CFG_OP_VLS:
10275 if (ppd->vls_operational != val) {
10276 ppd->vls_operational = val;
10277 if (!ppd->port)
10278 ret = -EINVAL;
10279 else
10280 ret = sdma_map_init(
10281 ppd->dd,
10282 ppd->port - 1,
10283 val,
10284 NULL);
10285 }
10286 break;
10287 /*
10288 * For link width, link width downgrade, and speed enable, always AND
10289 * the setting with what is actually supported. This has two benefits.
10290 * First, enabled can't have unsupported values, no matter what the
10291 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10292 * "fill in with your supported value" have all the bits in the
10293 * field set, so simply ANDing with supported has the desired result.
10294 */
10295 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10296 ppd->link_width_enabled = val & ppd->link_width_supported;
10297 break;
10298 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10299 ppd->link_width_downgrade_enabled =
10300 val & ppd->link_width_downgrade_supported;
10301 break;
10302 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10303 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10304 break;
10305 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10306 /*
10307 * HFI does not follow IB specs, save this value
10308 * so we can report it, if asked.
10309 */
10310 ppd->overrun_threshold = val;
10311 break;
10312 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10313 /*
10314 * HFI does not follow IB specs, save this value
10315 * so we can report it, if asked.
10316 */
10317 ppd->phy_error_threshold = val;
10318 break;
10319
10320 case HFI1_IB_CFG_MTU:
10321 set_send_length(ppd);
10322 break;
10323
10324 case HFI1_IB_CFG_PKEYS:
10325 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10326 set_partition_keys(ppd);
10327 break;
10328
10329 default:
10330 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10331 dd_dev_info(ppd->dd,
10332 "%s: which %s, val 0x%x: not implemented\n",
10333 __func__, ib_cfg_name(which), val);
10334 break;
10335 }
10336 return ret;
10337}
10338
10339/* begin functions related to vl arbitration table caching */
10340static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10341{
10342 int i;
10343
10344 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10345 VL_ARB_LOW_PRIO_TABLE_SIZE);
10346 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10347 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10348
10349 /*
10350 * Note that we always return values directly from the
10351 * 'vl_arb_cache' (and do no CSR reads) in response to a
10352 * 'Get(VLArbTable)'. This is obviously correct after a
10353 * 'Set(VLArbTable)', since the cache will then be up to
10354 * date. But it's also correct prior to any 'Set(VLArbTable)'
10355 * since then both the cache, and the relevant h/w registers
10356 * will be zeroed.
10357 */
10358
10359 for (i = 0; i < MAX_PRIO_TABLE; i++)
10360 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10361}
10362
10363/*
10364 * vl_arb_lock_cache
10365 *
10366 * All other vl_arb_* functions should be called only after locking
10367 * the cache.
10368 */
10369static inline struct vl_arb_cache *
10370vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10371{
10372 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10373 return NULL;
10374 spin_lock(&ppd->vl_arb_cache[idx].lock);
10375 return &ppd->vl_arb_cache[idx];
10376}
10377
10378static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10379{
10380 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10381}
10382
10383static void vl_arb_get_cache(struct vl_arb_cache *cache,
10384 struct ib_vl_weight_elem *vl)
10385{
10386 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10387}
10388
10389static void vl_arb_set_cache(struct vl_arb_cache *cache,
10390 struct ib_vl_weight_elem *vl)
10391{
10392 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10393}
10394
10395static int vl_arb_match_cache(struct vl_arb_cache *cache,
10396 struct ib_vl_weight_elem *vl)
10397{
10398 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10399}
10400/* end functions related to vl arbitration table caching */
10401
10402static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10403 u32 size, struct ib_vl_weight_elem *vl)
10404{
10405 struct hfi1_devdata *dd = ppd->dd;
10406 u64 reg;
10407 unsigned int i, is_up = 0;
10408 int drain, ret = 0;
10409
10410 mutex_lock(&ppd->hls_lock);
10411
10412 if (ppd->host_link_state & HLS_UP)
10413 is_up = 1;
10414
10415 drain = !is_ax(dd) && is_up;
10416
10417 if (drain)
10418 /*
10419 * Before adjusting VL arbitration weights, empty per-VL
10420 * FIFOs, otherwise a packet whose VL weight is being
10421 * set to 0 could get stuck in a FIFO with no chance to
10422 * egress.
10423 */
10424 ret = stop_drain_data_vls(dd);
10425
10426 if (ret) {
10427 dd_dev_err(
10428 dd,
10429 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10430 __func__);
10431 goto err;
10432 }
10433
10434 for (i = 0; i < size; i++, vl++) {
10435 /*
10436 * NOTE: The low priority shift and mask are used here, but
10437 * they are the same for both the low and high registers.
10438 */
10439 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10440 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10441 | (((u64)vl->weight
10442 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10443 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10444 write_csr(dd, target + (i * 8), reg);
10445 }
10446 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10447
10448 if (drain)
10449 open_fill_data_vls(dd); /* reopen all VLs */
10450
10451err:
10452 mutex_unlock(&ppd->hls_lock);
10453
10454 return ret;
10455}
10456
10457/*
10458 * Read one credit merge VL register.
10459 */
10460static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10461 struct vl_limit *vll)
10462{
10463 u64 reg = read_csr(dd, csr);
10464
10465 vll->dedicated = cpu_to_be16(
10466 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10467 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10468 vll->shared = cpu_to_be16(
10469 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10470 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10471}
10472
10473/*
10474 * Read the current credit merge limits.
10475 */
10476static int get_buffer_control(struct hfi1_devdata *dd,
10477 struct buffer_control *bc, u16 *overall_limit)
10478{
10479 u64 reg;
10480 int i;
10481
10482 /* not all entries are filled in */
10483 memset(bc, 0, sizeof(*bc));
10484
10485 /* OPA and HFI have a 1-1 mapping */
10486 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10487 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10488
10489 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10490 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10491
10492 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10493 bc->overall_shared_limit = cpu_to_be16(
10494 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10495 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10496 if (overall_limit)
10497 *overall_limit = (reg
10498 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10499 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10500 return sizeof(struct buffer_control);
10501}
10502
10503static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10504{
10505 u64 reg;
10506 int i;
10507
10508 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10509 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10510 for (i = 0; i < sizeof(u64); i++) {
10511 u8 byte = *(((u8 *)&reg) + i);
10512
10513 dp->vlnt[2 * i] = byte & 0xf;
10514 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10515 }
10516
10517 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10518 for (i = 0; i < sizeof(u64); i++) {
10519 u8 byte = *(((u8 *)&reg) + i);
10520
10521 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10522 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10523 }
10524 return sizeof(struct sc2vlnt);
10525}
10526
10527static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10528 struct ib_vl_weight_elem *vl)
10529{
10530 unsigned int i;
10531
10532 for (i = 0; i < nelems; i++, vl++) {
10533 vl->vl = 0xf;
10534 vl->weight = 0;
10535 }
10536}
10537
10538static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10539{
10540 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10541 DC_SC_VL_VAL(15_0,
10542 0, dp->vlnt[0] & 0xf,
10543 1, dp->vlnt[1] & 0xf,
10544 2, dp->vlnt[2] & 0xf,
10545 3, dp->vlnt[3] & 0xf,
10546 4, dp->vlnt[4] & 0xf,
10547 5, dp->vlnt[5] & 0xf,
10548 6, dp->vlnt[6] & 0xf,
10549 7, dp->vlnt[7] & 0xf,
10550 8, dp->vlnt[8] & 0xf,
10551 9, dp->vlnt[9] & 0xf,
10552 10, dp->vlnt[10] & 0xf,
10553 11, dp->vlnt[11] & 0xf,
10554 12, dp->vlnt[12] & 0xf,
10555 13, dp->vlnt[13] & 0xf,
10556 14, dp->vlnt[14] & 0xf,
10557 15, dp->vlnt[15] & 0xf));
10558 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10559 DC_SC_VL_VAL(31_16,
10560 16, dp->vlnt[16] & 0xf,
10561 17, dp->vlnt[17] & 0xf,
10562 18, dp->vlnt[18] & 0xf,
10563 19, dp->vlnt[19] & 0xf,
10564 20, dp->vlnt[20] & 0xf,
10565 21, dp->vlnt[21] & 0xf,
10566 22, dp->vlnt[22] & 0xf,
10567 23, dp->vlnt[23] & 0xf,
10568 24, dp->vlnt[24] & 0xf,
10569 25, dp->vlnt[25] & 0xf,
10570 26, dp->vlnt[26] & 0xf,
10571 27, dp->vlnt[27] & 0xf,
10572 28, dp->vlnt[28] & 0xf,
10573 29, dp->vlnt[29] & 0xf,
10574 30, dp->vlnt[30] & 0xf,
10575 31, dp->vlnt[31] & 0xf));
10576}
10577
10578static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10579 u16 limit)
10580{
10581 if (limit != 0)
10582 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10583 what, (int)limit, idx);
10584}
10585
10586/* change only the shared limit portion of SendCmGLobalCredit */
10587static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10588{
10589 u64 reg;
10590
10591 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10592 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10593 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10594 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10595}
10596
10597/* change only the total credit limit portion of SendCmGLobalCredit */
10598static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10599{
10600 u64 reg;
10601
10602 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10603 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10604 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10605 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10606}
10607
10608/* set the given per-VL shared limit */
10609static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10610{
10611 u64 reg;
10612 u32 addr;
10613
10614 if (vl < TXE_NUM_DATA_VL)
10615 addr = SEND_CM_CREDIT_VL + (8 * vl);
10616 else
10617 addr = SEND_CM_CREDIT_VL15;
10618
10619 reg = read_csr(dd, addr);
10620 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10621 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10622 write_csr(dd, addr, reg);
10623}
10624
10625/* set the given per-VL dedicated limit */
10626static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10627{
10628 u64 reg;
10629 u32 addr;
10630
10631 if (vl < TXE_NUM_DATA_VL)
10632 addr = SEND_CM_CREDIT_VL + (8 * vl);
10633 else
10634 addr = SEND_CM_CREDIT_VL15;
10635
10636 reg = read_csr(dd, addr);
10637 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10638 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10639 write_csr(dd, addr, reg);
10640}
10641
10642/* spin until the given per-VL status mask bits clear */
10643static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10644 const char *which)
10645{
10646 unsigned long timeout;
10647 u64 reg;
10648
10649 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10650 while (1) {
10651 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10652
10653 if (reg == 0)
10654 return; /* success */
10655 if (time_after(jiffies, timeout))
10656 break; /* timed out */
10657 udelay(1);
10658 }
10659
10660 dd_dev_err(dd,
10661 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10662 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10663 /*
10664 * If this occurs, it is likely there was a credit loss on the link.
10665 * The only recovery from that is a link bounce.
10666 */
10667 dd_dev_err(dd,
10668 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10669}
10670
10671/*
10672 * The number of credits on the VLs may be changed while everything
10673 * is "live", but the following algorithm must be followed due to
10674 * how the hardware is actually implemented. In particular,
10675 * Return_Credit_Status[] is the only correct status check.
10676 *
10677 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10678 * set Global_Shared_Credit_Limit = 0
10679 * use_all_vl = 1
10680 * mask0 = all VLs that are changing either dedicated or shared limits
10681 * set Shared_Limit[mask0] = 0
10682 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10683 * if (changing any dedicated limit)
10684 * mask1 = all VLs that are lowering dedicated limits
10685 * lower Dedicated_Limit[mask1]
10686 * spin until Return_Credit_Status[mask1] == 0
10687 * raise Dedicated_Limits
10688 * raise Shared_Limits
10689 * raise Global_Shared_Credit_Limit
10690 *
10691 * lower = if the new limit is lower, set the limit to the new value
10692 * raise = if the new limit is higher than the current value (may be changed
10693 * earlier in the algorithm), set the new limit to the new value
10694 */
10695static int set_buffer_control(struct hfi1_devdata *dd,
10696 struct buffer_control *new_bc)
10697{
10698 u64 changing_mask, ld_mask, stat_mask;
10699 int change_count;
10700 int i, use_all_mask;
10701 int this_shared_changing;
10702 /*
10703 * A0: add the variable any_shared_limit_changing below and in the
10704 * algorithm above. If removing A0 support, it can be removed.
10705 */
10706 int any_shared_limit_changing;
10707 struct buffer_control cur_bc;
10708 u8 changing[OPA_MAX_VLS];
10709 u8 lowering_dedicated[OPA_MAX_VLS];
10710 u16 cur_total;
10711 u32 new_total = 0;
10712 const u64 all_mask =
10713 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10714 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10715 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10716 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10717 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10718 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10719 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10720 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10721 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10722
10723#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10724#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10725
10726
10727 /* find the new total credits, do sanity check on unused VLs */
10728 for (i = 0; i < OPA_MAX_VLS; i++) {
10729 if (valid_vl(i)) {
10730 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10731 continue;
10732 }
10733 nonzero_msg(dd, i, "dedicated",
10734 be16_to_cpu(new_bc->vl[i].dedicated));
10735 nonzero_msg(dd, i, "shared",
10736 be16_to_cpu(new_bc->vl[i].shared));
10737 new_bc->vl[i].dedicated = 0;
10738 new_bc->vl[i].shared = 0;
10739 }
10740 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010741
Mike Marciniszyn77241052015-07-30 15:17:43 -040010742 /* fetch the current values */
10743 get_buffer_control(dd, &cur_bc, &cur_total);
10744
10745 /*
10746 * Create the masks we will use.
10747 */
10748 memset(changing, 0, sizeof(changing));
10749 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10750 /* NOTE: Assumes that the individual VL bits are adjacent and in
10751 increasing order */
10752 stat_mask =
10753 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10754 changing_mask = 0;
10755 ld_mask = 0;
10756 change_count = 0;
10757 any_shared_limit_changing = 0;
10758 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10759 if (!valid_vl(i))
10760 continue;
10761 this_shared_changing = new_bc->vl[i].shared
10762 != cur_bc.vl[i].shared;
10763 if (this_shared_changing)
10764 any_shared_limit_changing = 1;
10765 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10766 || this_shared_changing) {
10767 changing[i] = 1;
10768 changing_mask |= stat_mask;
10769 change_count++;
10770 }
10771 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10772 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10773 lowering_dedicated[i] = 1;
10774 ld_mask |= stat_mask;
10775 }
10776 }
10777
10778 /* bracket the credit change with a total adjustment */
10779 if (new_total > cur_total)
10780 set_global_limit(dd, new_total);
10781
10782 /*
10783 * Start the credit change algorithm.
10784 */
10785 use_all_mask = 0;
10786 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010787 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10788 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010789 set_global_shared(dd, 0);
10790 cur_bc.overall_shared_limit = 0;
10791 use_all_mask = 1;
10792 }
10793
10794 for (i = 0; i < NUM_USABLE_VLS; i++) {
10795 if (!valid_vl(i))
10796 continue;
10797
10798 if (changing[i]) {
10799 set_vl_shared(dd, i, 0);
10800 cur_bc.vl[i].shared = 0;
10801 }
10802 }
10803
10804 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10805 "shared");
10806
10807 if (change_count > 0) {
10808 for (i = 0; i < NUM_USABLE_VLS; i++) {
10809 if (!valid_vl(i))
10810 continue;
10811
10812 if (lowering_dedicated[i]) {
10813 set_vl_dedicated(dd, i,
10814 be16_to_cpu(new_bc->vl[i].dedicated));
10815 cur_bc.vl[i].dedicated =
10816 new_bc->vl[i].dedicated;
10817 }
10818 }
10819
10820 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10821
10822 /* now raise all dedicated that are going up */
10823 for (i = 0; i < NUM_USABLE_VLS; i++) {
10824 if (!valid_vl(i))
10825 continue;
10826
10827 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10828 be16_to_cpu(cur_bc.vl[i].dedicated))
10829 set_vl_dedicated(dd, i,
10830 be16_to_cpu(new_bc->vl[i].dedicated));
10831 }
10832 }
10833
10834 /* next raise all shared that are going up */
10835 for (i = 0; i < NUM_USABLE_VLS; i++) {
10836 if (!valid_vl(i))
10837 continue;
10838
10839 if (be16_to_cpu(new_bc->vl[i].shared) >
10840 be16_to_cpu(cur_bc.vl[i].shared))
10841 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10842 }
10843
10844 /* finally raise the global shared */
10845 if (be16_to_cpu(new_bc->overall_shared_limit) >
10846 be16_to_cpu(cur_bc.overall_shared_limit))
10847 set_global_shared(dd,
10848 be16_to_cpu(new_bc->overall_shared_limit));
10849
10850 /* bracket the credit change with a total adjustment */
10851 if (new_total < cur_total)
10852 set_global_limit(dd, new_total);
10853 return 0;
10854}
10855
10856/*
10857 * Read the given fabric manager table. Return the size of the
10858 * table (in bytes) on success, and a negative error code on
10859 * failure.
10860 */
10861int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10862
10863{
10864 int size;
10865 struct vl_arb_cache *vlc;
10866
10867 switch (which) {
10868 case FM_TBL_VL_HIGH_ARB:
10869 size = 256;
10870 /*
10871 * OPA specifies 128 elements (of 2 bytes each), though
10872 * HFI supports only 16 elements in h/w.
10873 */
10874 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10875 vl_arb_get_cache(vlc, t);
10876 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10877 break;
10878 case FM_TBL_VL_LOW_ARB:
10879 size = 256;
10880 /*
10881 * OPA specifies 128 elements (of 2 bytes each), though
10882 * HFI supports only 16 elements in h/w.
10883 */
10884 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10885 vl_arb_get_cache(vlc, t);
10886 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10887 break;
10888 case FM_TBL_BUFFER_CONTROL:
10889 size = get_buffer_control(ppd->dd, t, NULL);
10890 break;
10891 case FM_TBL_SC2VLNT:
10892 size = get_sc2vlnt(ppd->dd, t);
10893 break;
10894 case FM_TBL_VL_PREEMPT_ELEMS:
10895 size = 256;
10896 /* OPA specifies 128 elements, of 2 bytes each */
10897 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10898 break;
10899 case FM_TBL_VL_PREEMPT_MATRIX:
10900 size = 256;
10901 /*
10902 * OPA specifies that this is the same size as the VL
10903 * arbitration tables (i.e., 256 bytes).
10904 */
10905 break;
10906 default:
10907 return -EINVAL;
10908 }
10909 return size;
10910}
10911
10912/*
10913 * Write the given fabric manager table.
10914 */
10915int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10916{
10917 int ret = 0;
10918 struct vl_arb_cache *vlc;
10919
10920 switch (which) {
10921 case FM_TBL_VL_HIGH_ARB:
10922 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10923 if (vl_arb_match_cache(vlc, t)) {
10924 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10925 break;
10926 }
10927 vl_arb_set_cache(vlc, t);
10928 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10929 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10930 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10931 break;
10932 case FM_TBL_VL_LOW_ARB:
10933 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10934 if (vl_arb_match_cache(vlc, t)) {
10935 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10936 break;
10937 }
10938 vl_arb_set_cache(vlc, t);
10939 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10940 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10941 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10942 break;
10943 case FM_TBL_BUFFER_CONTROL:
10944 ret = set_buffer_control(ppd->dd, t);
10945 break;
10946 case FM_TBL_SC2VLNT:
10947 set_sc2vlnt(ppd->dd, t);
10948 break;
10949 default:
10950 ret = -EINVAL;
10951 }
10952 return ret;
10953}
10954
10955/*
10956 * Disable all data VLs.
10957 *
10958 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10959 */
10960static int disable_data_vls(struct hfi1_devdata *dd)
10961{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010962 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010963 return 1;
10964
10965 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10966
10967 return 0;
10968}
10969
10970/*
10971 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10972 * Just re-enables all data VLs (the "fill" part happens
10973 * automatically - the name was chosen for symmetry with
10974 * stop_drain_data_vls()).
10975 *
10976 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10977 */
10978int open_fill_data_vls(struct hfi1_devdata *dd)
10979{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010980 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010981 return 1;
10982
10983 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10984
10985 return 0;
10986}
10987
10988/*
10989 * drain_data_vls() - assumes that disable_data_vls() has been called,
10990 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10991 * engines to drop to 0.
10992 */
10993static void drain_data_vls(struct hfi1_devdata *dd)
10994{
10995 sc_wait(dd);
10996 sdma_wait(dd);
10997 pause_for_credit_return(dd);
10998}
10999
11000/*
11001 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11002 *
11003 * Use open_fill_data_vls() to resume using data VLs. This pair is
11004 * meant to be used like this:
11005 *
11006 * stop_drain_data_vls(dd);
11007 * // do things with per-VL resources
11008 * open_fill_data_vls(dd);
11009 */
11010int stop_drain_data_vls(struct hfi1_devdata *dd)
11011{
11012 int ret;
11013
11014 ret = disable_data_vls(dd);
11015 if (ret == 0)
11016 drain_data_vls(dd);
11017
11018 return ret;
11019}
11020
11021/*
11022 * Convert a nanosecond time to a cclock count. No matter how slow
11023 * the cclock, a non-zero ns will always have a non-zero result.
11024 */
11025u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11026{
11027 u32 cclocks;
11028
11029 if (dd->icode == ICODE_FPGA_EMULATION)
11030 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11031 else /* simulation pretends to be ASIC */
11032 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11033 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11034 cclocks = 1;
11035 return cclocks;
11036}
11037
11038/*
11039 * Convert a cclock count to nanoseconds. Not matter how slow
11040 * the cclock, a non-zero cclocks will always have a non-zero result.
11041 */
11042u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11043{
11044 u32 ns;
11045
11046 if (dd->icode == ICODE_FPGA_EMULATION)
11047 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11048 else /* simulation pretends to be ASIC */
11049 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11050 if (cclocks && !ns)
11051 ns = 1;
11052 return ns;
11053}
11054
11055/*
11056 * Dynamically adjust the receive interrupt timeout for a context based on
11057 * incoming packet rate.
11058 *
11059 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11060 */
11061static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11062{
11063 struct hfi1_devdata *dd = rcd->dd;
11064 u32 timeout = rcd->rcvavail_timeout;
11065
11066 /*
11067 * This algorithm doubles or halves the timeout depending on whether
11068 * the number of packets received in this interrupt were less than or
11069 * greater equal the interrupt count.
11070 *
11071 * The calculations below do not allow a steady state to be achieved.
11072 * Only at the endpoints it is possible to have an unchanging
11073 * timeout.
11074 */
11075 if (npkts < rcv_intr_count) {
11076 /*
11077 * Not enough packets arrived before the timeout, adjust
11078 * timeout downward.
11079 */
11080 if (timeout < 2) /* already at minimum? */
11081 return;
11082 timeout >>= 1;
11083 } else {
11084 /*
11085 * More than enough packets arrived before the timeout, adjust
11086 * timeout upward.
11087 */
11088 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11089 return;
11090 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11091 }
11092
11093 rcd->rcvavail_timeout = timeout;
11094 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
11095 been verified to be in range */
11096 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11097 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11098}
11099
11100void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11101 u32 intr_adjust, u32 npkts)
11102{
11103 struct hfi1_devdata *dd = rcd->dd;
11104 u64 reg;
11105 u32 ctxt = rcd->ctxt;
11106
11107 /*
11108 * Need to write timeout register before updating RcvHdrHead to ensure
11109 * that a new value is used when the HW decides to restart counting.
11110 */
11111 if (intr_adjust)
11112 adjust_rcv_timeout(rcd, npkts);
11113 if (updegr) {
11114 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11115 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11116 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11117 }
11118 mmiowb();
11119 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11120 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11121 << RCV_HDR_HEAD_HEAD_SHIFT);
11122 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11123 mmiowb();
11124}
11125
11126u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11127{
11128 u32 head, tail;
11129
11130 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11131 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11132
11133 if (rcd->rcvhdrtail_kvaddr)
11134 tail = get_rcvhdrtail(rcd);
11135 else
11136 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11137
11138 return head == tail;
11139}
11140
11141/*
11142 * Context Control and Receive Array encoding for buffer size:
11143 * 0x0 invalid
11144 * 0x1 4 KB
11145 * 0x2 8 KB
11146 * 0x3 16 KB
11147 * 0x4 32 KB
11148 * 0x5 64 KB
11149 * 0x6 128 KB
11150 * 0x7 256 KB
11151 * 0x8 512 KB (Receive Array only)
11152 * 0x9 1 MB (Receive Array only)
11153 * 0xa 2 MB (Receive Array only)
11154 *
11155 * 0xB-0xF - reserved (Receive Array only)
11156 *
11157 *
11158 * This routine assumes that the value has already been sanity checked.
11159 */
11160static u32 encoded_size(u32 size)
11161{
11162 switch (size) {
11163 case 4*1024: return 0x1;
11164 case 8*1024: return 0x2;
11165 case 16*1024: return 0x3;
11166 case 32*1024: return 0x4;
11167 case 64*1024: return 0x5;
11168 case 128*1024: return 0x6;
11169 case 256*1024: return 0x7;
11170 case 512*1024: return 0x8;
11171 case 1*1024*1024: return 0x9;
11172 case 2*1024*1024: return 0xa;
11173 }
11174 return 0x1; /* if invalid, go with the minimum size */
11175}
11176
11177void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11178{
11179 struct hfi1_ctxtdata *rcd;
11180 u64 rcvctrl, reg;
11181 int did_enable = 0;
11182
11183 rcd = dd->rcd[ctxt];
11184 if (!rcd)
11185 return;
11186
11187 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11188
11189 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11190 /* if the context already enabled, don't do the extra steps */
11191 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11192 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11193 /* reset the tail and hdr addresses, and sequence count */
11194 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11195 rcd->rcvhdrq_phys);
11196 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11197 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11198 rcd->rcvhdrqtailaddr_phys);
11199 rcd->seq_cnt = 1;
11200
11201 /* reset the cached receive header queue head value */
11202 rcd->head = 0;
11203
11204 /*
11205 * Zero the receive header queue so we don't get false
11206 * positives when checking the sequence number. The
11207 * sequence numbers could land exactly on the same spot.
11208 * E.g. a rcd restart before the receive header wrapped.
11209 */
11210 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11211
11212 /* starting timeout */
11213 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11214
11215 /* enable the context */
11216 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11217
11218 /* clean the egr buffer size first */
11219 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11220 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11221 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11222 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11223
11224 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11225 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11226 did_enable = 1;
11227
11228 /* zero RcvEgrIndexHead */
11229 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11230
11231 /* set eager count and base index */
11232 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11233 & RCV_EGR_CTRL_EGR_CNT_MASK)
11234 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11235 (((rcd->eager_base >> RCV_SHIFT)
11236 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11237 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11238 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11239
11240 /*
11241 * Set TID (expected) count and base index.
11242 * rcd->expected_count is set to individual RcvArray entries,
11243 * not pairs, and the CSR takes a pair-count in groups of
11244 * four, so divide by 8.
11245 */
11246 reg = (((rcd->expected_count >> RCV_SHIFT)
11247 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11248 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11249 (((rcd->expected_base >> RCV_SHIFT)
11250 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11251 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11252 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011253 if (ctxt == HFI1_CTRL_CTXT)
11254 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011255 }
11256 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11257 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011258 /*
11259 * When receive context is being disabled turn on tail
11260 * update with a dummy tail address and then disable
11261 * receive context.
11262 */
11263 if (dd->rcvhdrtail_dummy_physaddr) {
11264 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11265 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011266 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011267 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11268 }
11269
Mike Marciniszyn77241052015-07-30 15:17:43 -040011270 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11271 }
11272 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11273 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11274 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11275 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11276 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11277 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011278 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11279 /* See comment on RcvCtxtCtrl.TailUpd above */
11280 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11281 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11282 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011283 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11284 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11285 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11286 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11287 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11288 /* In one-packet-per-eager mode, the size comes from
11289 the RcvArray entry. */
11290 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11291 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11292 }
11293 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11294 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11295 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11296 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11297 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11298 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11299 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11300 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11301 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11302 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11303 rcd->rcvctrl = rcvctrl;
11304 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11305 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11306
11307 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11308 if (did_enable
11309 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11310 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11311 if (reg != 0) {
11312 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11313 ctxt, reg);
11314 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11315 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11316 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11317 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11318 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11319 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11320 ctxt, reg, reg == 0 ? "not" : "still");
11321 }
11322 }
11323
11324 if (did_enable) {
11325 /*
11326 * The interrupt timeout and count must be set after
11327 * the context is enabled to take effect.
11328 */
11329 /* set interrupt timeout */
11330 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11331 (u64)rcd->rcvavail_timeout <<
11332 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11333
11334 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11335 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11336 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11337 }
11338
11339 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11340 /*
11341 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011342 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11343 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011344 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011345 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11346 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011347}
11348
11349u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11350 u64 **cntrp)
11351{
11352 int ret;
11353 u64 val = 0;
11354
11355 if (namep) {
11356 ret = dd->cntrnameslen;
11357 if (pos != 0) {
11358 dd_dev_err(dd, "read_cntrs does not support indexing");
11359 return 0;
11360 }
11361 *namep = dd->cntrnames;
11362 } else {
11363 const struct cntr_entry *entry;
11364 int i, j;
11365
11366 ret = (dd->ndevcntrs) * sizeof(u64);
11367 if (pos != 0) {
11368 dd_dev_err(dd, "read_cntrs does not support indexing");
11369 return 0;
11370 }
11371
11372 /* Get the start of the block of counters */
11373 *cntrp = dd->cntrs;
11374
11375 /*
11376 * Now go and fill in each counter in the block.
11377 */
11378 for (i = 0; i < DEV_CNTR_LAST; i++) {
11379 entry = &dev_cntrs[i];
11380 hfi1_cdbg(CNTR, "reading %s", entry->name);
11381 if (entry->flags & CNTR_DISABLED) {
11382 /* Nothing */
11383 hfi1_cdbg(CNTR, "\tDisabled\n");
11384 } else {
11385 if (entry->flags & CNTR_VL) {
11386 hfi1_cdbg(CNTR, "\tPer VL\n");
11387 for (j = 0; j < C_VL_COUNT; j++) {
11388 val = entry->rw_cntr(entry,
11389 dd, j,
11390 CNTR_MODE_R,
11391 0);
11392 hfi1_cdbg(
11393 CNTR,
11394 "\t\tRead 0x%llx for %d\n",
11395 val, j);
11396 dd->cntrs[entry->offset + j] =
11397 val;
11398 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011399 } else if (entry->flags & CNTR_SDMA) {
11400 hfi1_cdbg(CNTR,
11401 "\t Per SDMA Engine\n");
11402 for (j = 0; j < dd->chip_sdma_engines;
11403 j++) {
11404 val =
11405 entry->rw_cntr(entry, dd, j,
11406 CNTR_MODE_R, 0);
11407 hfi1_cdbg(CNTR,
11408 "\t\tRead 0x%llx for %d\n",
11409 val, j);
11410 dd->cntrs[entry->offset + j] =
11411 val;
11412 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011413 } else {
11414 val = entry->rw_cntr(entry, dd,
11415 CNTR_INVALID_VL,
11416 CNTR_MODE_R, 0);
11417 dd->cntrs[entry->offset] = val;
11418 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11419 }
11420 }
11421 }
11422 }
11423 return ret;
11424}
11425
11426/*
11427 * Used by sysfs to create files for hfi stats to read
11428 */
11429u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11430 char **namep, u64 **cntrp)
11431{
11432 int ret;
11433 u64 val = 0;
11434
11435 if (namep) {
11436 ret = dd->portcntrnameslen;
11437 if (pos != 0) {
11438 dd_dev_err(dd, "index not supported");
11439 return 0;
11440 }
11441 *namep = dd->portcntrnames;
11442 } else {
11443 const struct cntr_entry *entry;
11444 struct hfi1_pportdata *ppd;
11445 int i, j;
11446
11447 ret = (dd->nportcntrs) * sizeof(u64);
11448 if (pos != 0) {
11449 dd_dev_err(dd, "indexing not supported");
11450 return 0;
11451 }
11452 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11453 *cntrp = ppd->cntrs;
11454
11455 for (i = 0; i < PORT_CNTR_LAST; i++) {
11456 entry = &port_cntrs[i];
11457 hfi1_cdbg(CNTR, "reading %s", entry->name);
11458 if (entry->flags & CNTR_DISABLED) {
11459 /* Nothing */
11460 hfi1_cdbg(CNTR, "\tDisabled\n");
11461 continue;
11462 }
11463
11464 if (entry->flags & CNTR_VL) {
11465 hfi1_cdbg(CNTR, "\tPer VL");
11466 for (j = 0; j < C_VL_COUNT; j++) {
11467 val = entry->rw_cntr(entry, ppd, j,
11468 CNTR_MODE_R,
11469 0);
11470 hfi1_cdbg(
11471 CNTR,
11472 "\t\tRead 0x%llx for %d",
11473 val, j);
11474 ppd->cntrs[entry->offset + j] = val;
11475 }
11476 } else {
11477 val = entry->rw_cntr(entry, ppd,
11478 CNTR_INVALID_VL,
11479 CNTR_MODE_R,
11480 0);
11481 ppd->cntrs[entry->offset] = val;
11482 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11483 }
11484 }
11485 }
11486 return ret;
11487}
11488
11489static void free_cntrs(struct hfi1_devdata *dd)
11490{
11491 struct hfi1_pportdata *ppd;
11492 int i;
11493
11494 if (dd->synth_stats_timer.data)
11495 del_timer_sync(&dd->synth_stats_timer);
11496 dd->synth_stats_timer.data = 0;
11497 ppd = (struct hfi1_pportdata *)(dd + 1);
11498 for (i = 0; i < dd->num_pports; i++, ppd++) {
11499 kfree(ppd->cntrs);
11500 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011501 free_percpu(ppd->ibport_data.rvp.rc_acks);
11502 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11503 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011504 ppd->cntrs = NULL;
11505 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011506 ppd->ibport_data.rvp.rc_acks = NULL;
11507 ppd->ibport_data.rvp.rc_qacks = NULL;
11508 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011509 }
11510 kfree(dd->portcntrnames);
11511 dd->portcntrnames = NULL;
11512 kfree(dd->cntrs);
11513 dd->cntrs = NULL;
11514 kfree(dd->scntrs);
11515 dd->scntrs = NULL;
11516 kfree(dd->cntrnames);
11517 dd->cntrnames = NULL;
11518}
11519
11520#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11521#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11522
11523static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11524 u64 *psval, void *context, int vl)
11525{
11526 u64 val;
11527 u64 sval = *psval;
11528
11529 if (entry->flags & CNTR_DISABLED) {
11530 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11531 return 0;
11532 }
11533
11534 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11535
11536 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11537
11538 /* If its a synthetic counter there is more work we need to do */
11539 if (entry->flags & CNTR_SYNTH) {
11540 if (sval == CNTR_MAX) {
11541 /* No need to read already saturated */
11542 return CNTR_MAX;
11543 }
11544
11545 if (entry->flags & CNTR_32BIT) {
11546 /* 32bit counters can wrap multiple times */
11547 u64 upper = sval >> 32;
11548 u64 lower = (sval << 32) >> 32;
11549
11550 if (lower > val) { /* hw wrapped */
11551 if (upper == CNTR_32BIT_MAX)
11552 val = CNTR_MAX;
11553 else
11554 upper++;
11555 }
11556
11557 if (val != CNTR_MAX)
11558 val = (upper << 32) | val;
11559
11560 } else {
11561 /* If we rolled we are saturated */
11562 if ((val < sval) || (val > CNTR_MAX))
11563 val = CNTR_MAX;
11564 }
11565 }
11566
11567 *psval = val;
11568
11569 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11570
11571 return val;
11572}
11573
11574static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11575 struct cntr_entry *entry,
11576 u64 *psval, void *context, int vl, u64 data)
11577{
11578 u64 val;
11579
11580 if (entry->flags & CNTR_DISABLED) {
11581 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11582 return 0;
11583 }
11584
11585 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11586
11587 if (entry->flags & CNTR_SYNTH) {
11588 *psval = data;
11589 if (entry->flags & CNTR_32BIT) {
11590 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11591 (data << 32) >> 32);
11592 val = data; /* return the full 64bit value */
11593 } else {
11594 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11595 data);
11596 }
11597 } else {
11598 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11599 }
11600
11601 *psval = val;
11602
11603 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11604
11605 return val;
11606}
11607
11608u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11609{
11610 struct cntr_entry *entry;
11611 u64 *sval;
11612
11613 entry = &dev_cntrs[index];
11614 sval = dd->scntrs + entry->offset;
11615
11616 if (vl != CNTR_INVALID_VL)
11617 sval += vl;
11618
11619 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11620}
11621
11622u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11623{
11624 struct cntr_entry *entry;
11625 u64 *sval;
11626
11627 entry = &dev_cntrs[index];
11628 sval = dd->scntrs + entry->offset;
11629
11630 if (vl != CNTR_INVALID_VL)
11631 sval += vl;
11632
11633 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11634}
11635
11636u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11637{
11638 struct cntr_entry *entry;
11639 u64 *sval;
11640
11641 entry = &port_cntrs[index];
11642 sval = ppd->scntrs + entry->offset;
11643
11644 if (vl != CNTR_INVALID_VL)
11645 sval += vl;
11646
11647 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11648 (index <= C_RCV_HDR_OVF_LAST)) {
11649 /* We do not want to bother for disabled contexts */
11650 return 0;
11651 }
11652
11653 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11654}
11655
11656u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11657{
11658 struct cntr_entry *entry;
11659 u64 *sval;
11660
11661 entry = &port_cntrs[index];
11662 sval = ppd->scntrs + entry->offset;
11663
11664 if (vl != CNTR_INVALID_VL)
11665 sval += vl;
11666
11667 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11668 (index <= C_RCV_HDR_OVF_LAST)) {
11669 /* We do not want to bother for disabled contexts */
11670 return 0;
11671 }
11672
11673 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11674}
11675
11676static void update_synth_timer(unsigned long opaque)
11677{
11678 u64 cur_tx;
11679 u64 cur_rx;
11680 u64 total_flits;
11681 u8 update = 0;
11682 int i, j, vl;
11683 struct hfi1_pportdata *ppd;
11684 struct cntr_entry *entry;
11685
11686 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11687
11688 /*
11689 * Rather than keep beating on the CSRs pick a minimal set that we can
11690 * check to watch for potential roll over. We can do this by looking at
11691 * the number of flits sent/recv. If the total flits exceeds 32bits then
11692 * we have to iterate all the counters and update.
11693 */
11694 entry = &dev_cntrs[C_DC_RCV_FLITS];
11695 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11696
11697 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11698 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11699
11700 hfi1_cdbg(
11701 CNTR,
11702 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11703 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11704
11705 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11706 /*
11707 * May not be strictly necessary to update but it won't hurt and
11708 * simplifies the logic here.
11709 */
11710 update = 1;
11711 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11712 dd->unit);
11713 } else {
11714 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11715 hfi1_cdbg(CNTR,
11716 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11717 total_flits, (u64)CNTR_32BIT_MAX);
11718 if (total_flits >= CNTR_32BIT_MAX) {
11719 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11720 dd->unit);
11721 update = 1;
11722 }
11723 }
11724
11725 if (update) {
11726 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11727 for (i = 0; i < DEV_CNTR_LAST; i++) {
11728 entry = &dev_cntrs[i];
11729 if (entry->flags & CNTR_VL) {
11730 for (vl = 0; vl < C_VL_COUNT; vl++)
11731 read_dev_cntr(dd, i, vl);
11732 } else {
11733 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11734 }
11735 }
11736 ppd = (struct hfi1_pportdata *)(dd + 1);
11737 for (i = 0; i < dd->num_pports; i++, ppd++) {
11738 for (j = 0; j < PORT_CNTR_LAST; j++) {
11739 entry = &port_cntrs[j];
11740 if (entry->flags & CNTR_VL) {
11741 for (vl = 0; vl < C_VL_COUNT; vl++)
11742 read_port_cntr(ppd, j, vl);
11743 } else {
11744 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11745 }
11746 }
11747 }
11748
11749 /*
11750 * We want the value in the register. The goal is to keep track
11751 * of the number of "ticks" not the counter value. In other
11752 * words if the register rolls we want to notice it and go ahead
11753 * and force an update.
11754 */
11755 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11756 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11757 CNTR_MODE_R, 0);
11758
11759 entry = &dev_cntrs[C_DC_RCV_FLITS];
11760 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11761 CNTR_MODE_R, 0);
11762
11763 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11764 dd->unit, dd->last_tx, dd->last_rx);
11765
11766 } else {
11767 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11768 }
11769
11770mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11771}
11772
11773#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11774static int init_cntrs(struct hfi1_devdata *dd)
11775{
Dean Luickc024c552016-01-11 18:30:57 -050011776 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011777 size_t sz;
11778 char *p;
11779 char name[C_MAX_NAME];
11780 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011781 const char *bit_type_32 = ",32";
11782 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011783
11784 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011785 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11786 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011787
11788 /***********************/
11789 /* per device counters */
11790 /***********************/
11791
11792 /* size names and determine how many we have*/
11793 dd->ndevcntrs = 0;
11794 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011795
11796 for (i = 0; i < DEV_CNTR_LAST; i++) {
11797 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11798 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11799 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11800 continue;
11801 }
11802
11803 if (dev_cntrs[i].flags & CNTR_VL) {
11804 hfi1_dbg_early("\tProcessing VL cntr\n");
Dean Luickc024c552016-01-11 18:30:57 -050011805 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011806 for (j = 0; j < C_VL_COUNT; j++) {
11807 memset(name, '\0', C_MAX_NAME);
11808 snprintf(name, C_MAX_NAME, "%s%d",
11809 dev_cntrs[i].name,
11810 vl_from_idx(j));
11811 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011812 /* Add ",32" for 32-bit counters */
11813 if (dev_cntrs[i].flags & CNTR_32BIT)
11814 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011815 sz++;
11816 hfi1_dbg_early("\t\t%s\n", name);
11817 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011818 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011819 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11820 hfi1_dbg_early(
11821 "\tProcessing per SDE counters chip enginers %u\n",
11822 dd->chip_sdma_engines);
Dean Luickc024c552016-01-11 18:30:57 -050011823 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011824 for (j = 0; j < dd->chip_sdma_engines; j++) {
11825 memset(name, '\0', C_MAX_NAME);
11826 snprintf(name, C_MAX_NAME, "%s%d",
11827 dev_cntrs[i].name, j);
11828 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011829 /* Add ",32" for 32-bit counters */
11830 if (dev_cntrs[i].flags & CNTR_32BIT)
11831 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011832 sz++;
11833 hfi1_dbg_early("\t\t%s\n", name);
11834 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011835 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011836 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011837 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011838 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011839 /* Add ",32" for 32-bit counters */
11840 if (dev_cntrs[i].flags & CNTR_32BIT)
11841 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011842 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011843 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011844 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11845 }
11846 }
11847
11848 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011849 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011850 if (!dd->cntrs)
11851 goto bail;
11852
Dean Luickc024c552016-01-11 18:30:57 -050011853 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011854 if (!dd->scntrs)
11855 goto bail;
11856
11857
11858 /* allocate space for the counter names */
11859 dd->cntrnameslen = sz;
11860 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11861 if (!dd->cntrnames)
11862 goto bail;
11863
11864 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011865 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011866 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11867 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011868 } else if (dev_cntrs[i].flags & CNTR_VL) {
11869 for (j = 0; j < C_VL_COUNT; j++) {
11870 memset(name, '\0', C_MAX_NAME);
11871 snprintf(name, C_MAX_NAME, "%s%d",
11872 dev_cntrs[i].name,
11873 vl_from_idx(j));
11874 memcpy(p, name, strlen(name));
11875 p += strlen(name);
11876
11877 /* Counter is 32 bits */
11878 if (dev_cntrs[i].flags & CNTR_32BIT) {
11879 memcpy(p, bit_type_32, bit_type_32_sz);
11880 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011881 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011882
Mike Marciniszyn77241052015-07-30 15:17:43 -040011883 *p++ = '\n';
11884 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011885 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11886 for (j = 0; j < dd->chip_sdma_engines; j++) {
11887 memset(name, '\0', C_MAX_NAME);
11888 snprintf(name, C_MAX_NAME, "%s%d",
11889 dev_cntrs[i].name, j);
11890 memcpy(p, name, strlen(name));
11891 p += strlen(name);
11892
11893 /* Counter is 32 bits */
11894 if (dev_cntrs[i].flags & CNTR_32BIT) {
11895 memcpy(p, bit_type_32, bit_type_32_sz);
11896 p += bit_type_32_sz;
11897 }
11898
11899 *p++ = '\n';
11900 }
11901 } else {
11902 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11903 p += strlen(dev_cntrs[i].name);
11904
11905 /* Counter is 32 bits */
11906 if (dev_cntrs[i].flags & CNTR_32BIT) {
11907 memcpy(p, bit_type_32, bit_type_32_sz);
11908 p += bit_type_32_sz;
11909 }
11910
11911 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040011912 }
11913 }
11914
11915 /*********************/
11916 /* per port counters */
11917 /*********************/
11918
11919 /*
11920 * Go through the counters for the overflows and disable the ones we
11921 * don't need. This varies based on platform so we need to do it
11922 * dynamically here.
11923 */
11924 rcv_ctxts = dd->num_rcv_contexts;
11925 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11926 i <= C_RCV_HDR_OVF_LAST; i++) {
11927 port_cntrs[i].flags |= CNTR_DISABLED;
11928 }
11929
11930 /* size port counter names and determine how many we have*/
11931 sz = 0;
11932 dd->nportcntrs = 0;
11933 for (i = 0; i < PORT_CNTR_LAST; i++) {
11934 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11935 if (port_cntrs[i].flags & CNTR_DISABLED) {
11936 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11937 continue;
11938 }
11939
11940 if (port_cntrs[i].flags & CNTR_VL) {
11941 hfi1_dbg_early("\tProcessing VL cntr\n");
11942 port_cntrs[i].offset = dd->nportcntrs;
11943 for (j = 0; j < C_VL_COUNT; j++) {
11944 memset(name, '\0', C_MAX_NAME);
11945 snprintf(name, C_MAX_NAME, "%s%d",
11946 port_cntrs[i].name,
11947 vl_from_idx(j));
11948 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011949 /* Add ",32" for 32-bit counters */
11950 if (port_cntrs[i].flags & CNTR_32BIT)
11951 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011952 sz++;
11953 hfi1_dbg_early("\t\t%s\n", name);
11954 dd->nportcntrs++;
11955 }
11956 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011957 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011958 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011959 /* Add ",32" for 32-bit counters */
11960 if (port_cntrs[i].flags & CNTR_32BIT)
11961 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011962 port_cntrs[i].offset = dd->nportcntrs;
11963 dd->nportcntrs++;
11964 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11965 }
11966 }
11967
11968 /* allocate space for the counter names */
11969 dd->portcntrnameslen = sz;
11970 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11971 if (!dd->portcntrnames)
11972 goto bail;
11973
11974 /* fill in port cntr names */
11975 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11976 if (port_cntrs[i].flags & CNTR_DISABLED)
11977 continue;
11978
11979 if (port_cntrs[i].flags & CNTR_VL) {
11980 for (j = 0; j < C_VL_COUNT; j++) {
11981 memset(name, '\0', C_MAX_NAME);
11982 snprintf(name, C_MAX_NAME, "%s%d",
11983 port_cntrs[i].name,
11984 vl_from_idx(j));
11985 memcpy(p, name, strlen(name));
11986 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011987
11988 /* Counter is 32 bits */
11989 if (port_cntrs[i].flags & CNTR_32BIT) {
11990 memcpy(p, bit_type_32, bit_type_32_sz);
11991 p += bit_type_32_sz;
11992 }
11993
Mike Marciniszyn77241052015-07-30 15:17:43 -040011994 *p++ = '\n';
11995 }
11996 } else {
11997 memcpy(p, port_cntrs[i].name,
11998 strlen(port_cntrs[i].name));
11999 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012000
12001 /* Counter is 32 bits */
12002 if (port_cntrs[i].flags & CNTR_32BIT) {
12003 memcpy(p, bit_type_32, bit_type_32_sz);
12004 p += bit_type_32_sz;
12005 }
12006
Mike Marciniszyn77241052015-07-30 15:17:43 -040012007 *p++ = '\n';
12008 }
12009 }
12010
12011 /* allocate per port storage for counter values */
12012 ppd = (struct hfi1_pportdata *)(dd + 1);
12013 for (i = 0; i < dd->num_pports; i++, ppd++) {
12014 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12015 if (!ppd->cntrs)
12016 goto bail;
12017
12018 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12019 if (!ppd->scntrs)
12020 goto bail;
12021 }
12022
12023 /* CPU counters need to be allocated and zeroed */
12024 if (init_cpu_counters(dd))
12025 goto bail;
12026
12027 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12028 return 0;
12029bail:
12030 free_cntrs(dd);
12031 return -ENOMEM;
12032}
12033
12034
12035static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12036{
12037 switch (chip_lstate) {
12038 default:
12039 dd_dev_err(dd,
12040 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12041 chip_lstate);
12042 /* fall through */
12043 case LSTATE_DOWN:
12044 return IB_PORT_DOWN;
12045 case LSTATE_INIT:
12046 return IB_PORT_INIT;
12047 case LSTATE_ARMED:
12048 return IB_PORT_ARMED;
12049 case LSTATE_ACTIVE:
12050 return IB_PORT_ACTIVE;
12051 }
12052}
12053
12054u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12055{
12056 /* look at the HFI meta-states only */
12057 switch (chip_pstate & 0xf0) {
12058 default:
12059 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12060 chip_pstate);
12061 /* fall through */
12062 case PLS_DISABLED:
12063 return IB_PORTPHYSSTATE_DISABLED;
12064 case PLS_OFFLINE:
12065 return OPA_PORTPHYSSTATE_OFFLINE;
12066 case PLS_POLLING:
12067 return IB_PORTPHYSSTATE_POLLING;
12068 case PLS_CONFIGPHY:
12069 return IB_PORTPHYSSTATE_TRAINING;
12070 case PLS_LINKUP:
12071 return IB_PORTPHYSSTATE_LINKUP;
12072 case PLS_PHYTEST:
12073 return IB_PORTPHYSSTATE_PHY_TEST;
12074 }
12075}
12076
12077/* return the OPA port logical state name */
12078const char *opa_lstate_name(u32 lstate)
12079{
12080 static const char * const port_logical_names[] = {
12081 "PORT_NOP",
12082 "PORT_DOWN",
12083 "PORT_INIT",
12084 "PORT_ARMED",
12085 "PORT_ACTIVE",
12086 "PORT_ACTIVE_DEFER",
12087 };
12088 if (lstate < ARRAY_SIZE(port_logical_names))
12089 return port_logical_names[lstate];
12090 return "unknown";
12091}
12092
12093/* return the OPA port physical state name */
12094const char *opa_pstate_name(u32 pstate)
12095{
12096 static const char * const port_physical_names[] = {
12097 "PHYS_NOP",
12098 "reserved1",
12099 "PHYS_POLL",
12100 "PHYS_DISABLED",
12101 "PHYS_TRAINING",
12102 "PHYS_LINKUP",
12103 "PHYS_LINK_ERR_RECOVER",
12104 "PHYS_PHY_TEST",
12105 "reserved8",
12106 "PHYS_OFFLINE",
12107 "PHYS_GANGED",
12108 "PHYS_TEST",
12109 };
12110 if (pstate < ARRAY_SIZE(port_physical_names))
12111 return port_physical_names[pstate];
12112 return "unknown";
12113}
12114
12115/*
12116 * Read the hardware link state and set the driver's cached value of it.
12117 * Return the (new) current value.
12118 */
12119u32 get_logical_state(struct hfi1_pportdata *ppd)
12120{
12121 u32 new_state;
12122
12123 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12124 if (new_state != ppd->lstate) {
12125 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12126 opa_lstate_name(new_state), new_state);
12127 ppd->lstate = new_state;
12128 }
12129 /*
12130 * Set port status flags in the page mapped into userspace
12131 * memory. Do it here to ensure a reliable state - this is
12132 * the only function called by all state handling code.
12133 * Always set the flags due to the fact that the cache value
12134 * might have been changed explicitly outside of this
12135 * function.
12136 */
12137 if (ppd->statusp) {
12138 switch (ppd->lstate) {
12139 case IB_PORT_DOWN:
12140 case IB_PORT_INIT:
12141 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12142 HFI1_STATUS_IB_READY);
12143 break;
12144 case IB_PORT_ARMED:
12145 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12146 break;
12147 case IB_PORT_ACTIVE:
12148 *ppd->statusp |= HFI1_STATUS_IB_READY;
12149 break;
12150 }
12151 }
12152 return ppd->lstate;
12153}
12154
12155/**
12156 * wait_logical_linkstate - wait for an IB link state change to occur
12157 * @ppd: port device
12158 * @state: the state to wait for
12159 * @msecs: the number of milliseconds to wait
12160 *
12161 * Wait up to msecs milliseconds for IB link state change to occur.
12162 * For now, take the easy polling route.
12163 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12164 */
12165static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12166 int msecs)
12167{
12168 unsigned long timeout;
12169
12170 timeout = jiffies + msecs_to_jiffies(msecs);
12171 while (1) {
12172 if (get_logical_state(ppd) == state)
12173 return 0;
12174 if (time_after(jiffies, timeout))
12175 break;
12176 msleep(20);
12177 }
12178 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12179
12180 return -ETIMEDOUT;
12181}
12182
12183u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12184{
12185 static u32 remembered_state = 0xff;
12186 u32 pstate;
12187 u32 ib_pstate;
12188
12189 pstate = read_physical_state(ppd->dd);
12190 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12191 if (remembered_state != ib_pstate) {
12192 dd_dev_info(ppd->dd,
12193 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12194 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12195 pstate);
12196 remembered_state = ib_pstate;
12197 }
12198 return ib_pstate;
12199}
12200
12201/*
12202 * Read/modify/write ASIC_QSFP register bits as selected by mask
12203 * data: 0 or 1 in the positions depending on what needs to be written
12204 * dir: 0 for read, 1 for write
12205 * mask: select by setting
12206 * I2CCLK (bit 0)
12207 * I2CDATA (bit 1)
12208 */
12209u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12210 u32 mask)
12211{
12212 u64 qsfp_oe, target_oe;
12213
12214 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12215 if (mask) {
12216 /* We are writing register bits, so lock access */
12217 dir &= mask;
12218 data &= mask;
12219
12220 qsfp_oe = read_csr(dd, target_oe);
12221 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12222 write_csr(dd, target_oe, qsfp_oe);
12223 }
12224 /* We are exclusively reading bits here, but it is unlikely
12225 * we'll get valid data when we set the direction of the pin
12226 * in the same call, so read should call this function again
12227 * to get valid data
12228 */
12229 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12230}
12231
12232#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12233(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12234
12235#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12236(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12237
12238int hfi1_init_ctxt(struct send_context *sc)
12239{
12240 if (sc != NULL) {
12241 struct hfi1_devdata *dd = sc->dd;
12242 u64 reg;
12243 u8 set = (sc->type == SC_USER ?
12244 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12245 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12246 reg = read_kctxt_csr(dd, sc->hw_context,
12247 SEND_CTXT_CHECK_ENABLE);
12248 if (set)
12249 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12250 else
12251 SET_STATIC_RATE_CONTROL_SMASK(reg);
12252 write_kctxt_csr(dd, sc->hw_context,
12253 SEND_CTXT_CHECK_ENABLE, reg);
12254 }
12255 return 0;
12256}
12257
12258int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12259{
12260 int ret = 0;
12261 u64 reg;
12262
12263 if (dd->icode != ICODE_RTL_SILICON) {
12264 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12265 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12266 __func__);
12267 return -EINVAL;
12268 }
12269 reg = read_csr(dd, ASIC_STS_THERM);
12270 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12271 ASIC_STS_THERM_CURR_TEMP_MASK);
12272 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12273 ASIC_STS_THERM_LO_TEMP_MASK);
12274 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12275 ASIC_STS_THERM_HI_TEMP_MASK);
12276 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12277 ASIC_STS_THERM_CRIT_TEMP_MASK);
12278 /* triggers is a 3-bit value - 1 bit per trigger. */
12279 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12280
12281 return ret;
12282}
12283
12284/* ========================================================================= */
12285
12286/*
12287 * Enable/disable chip from delivering interrupts.
12288 */
12289void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12290{
12291 int i;
12292
12293 /*
12294 * In HFI, the mask needs to be 1 to allow interrupts.
12295 */
12296 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012297 /* enable all interrupts */
12298 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12299 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12300
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012301 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012302 } else {
12303 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12304 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12305 }
12306}
12307
12308/*
12309 * Clear all interrupt sources on the chip.
12310 */
12311static void clear_all_interrupts(struct hfi1_devdata *dd)
12312{
12313 int i;
12314
12315 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12316 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12317
12318 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12319 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12320 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12321 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12322 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12323 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12324 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12325 for (i = 0; i < dd->chip_send_contexts; i++)
12326 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12327 for (i = 0; i < dd->chip_sdma_engines; i++)
12328 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12329
12330 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12331 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12332 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12333}
12334
12335/* Move to pcie.c? */
12336static void disable_intx(struct pci_dev *pdev)
12337{
12338 pci_intx(pdev, 0);
12339}
12340
12341static void clean_up_interrupts(struct hfi1_devdata *dd)
12342{
12343 int i;
12344
12345 /* remove irqs - must happen before disabling/turning off */
12346 if (dd->num_msix_entries) {
12347 /* MSI-X */
12348 struct hfi1_msix_entry *me = dd->msix_entries;
12349
12350 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12351 if (me->arg == NULL) /* => no irq, no affinity */
12352 break;
12353 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12354 NULL);
12355 free_irq(me->msix.vector, me->arg);
12356 }
12357 } else {
12358 /* INTx */
12359 if (dd->requested_intx_irq) {
12360 free_irq(dd->pcidev->irq, dd);
12361 dd->requested_intx_irq = 0;
12362 }
12363 }
12364
12365 /* turn off interrupts */
12366 if (dd->num_msix_entries) {
12367 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012368 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012369 } else {
12370 /* INTx */
12371 disable_intx(dd->pcidev);
12372 }
12373
12374 /* clean structures */
12375 for (i = 0; i < dd->num_msix_entries; i++)
12376 free_cpumask_var(dd->msix_entries[i].mask);
12377 kfree(dd->msix_entries);
12378 dd->msix_entries = NULL;
12379 dd->num_msix_entries = 0;
12380}
12381
12382/*
12383 * Remap the interrupt source from the general handler to the given MSI-X
12384 * interrupt.
12385 */
12386static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12387{
12388 u64 reg;
12389 int m, n;
12390
12391 /* clear from the handled mask of the general interrupt */
12392 m = isrc / 64;
12393 n = isrc % 64;
12394 dd->gi_mask[m] &= ~((u64)1 << n);
12395
12396 /* direct the chip source to the given MSI-X interrupt */
12397 m = isrc / 8;
12398 n = isrc % 8;
12399 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12400 reg &= ~((u64)0xff << (8*n));
12401 reg |= ((u64)msix_intr & 0xff) << (8*n);
12402 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12403}
12404
12405static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12406 int engine, int msix_intr)
12407{
12408 /*
12409 * SDMA engine interrupt sources grouped by type, rather than
12410 * engine. Per-engine interrupts are as follows:
12411 * SDMA
12412 * SDMAProgress
12413 * SDMAIdle
12414 */
12415 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12416 msix_intr);
12417 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12418 msix_intr);
12419 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12420 msix_intr);
12421}
12422
Mike Marciniszyn77241052015-07-30 15:17:43 -040012423static int request_intx_irq(struct hfi1_devdata *dd)
12424{
12425 int ret;
12426
Jubin John98050712015-11-16 21:59:27 -050012427 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12428 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012429 ret = request_irq(dd->pcidev->irq, general_interrupt,
12430 IRQF_SHARED, dd->intx_name, dd);
12431 if (ret)
12432 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12433 ret);
12434 else
12435 dd->requested_intx_irq = 1;
12436 return ret;
12437}
12438
12439static int request_msix_irqs(struct hfi1_devdata *dd)
12440{
12441 const struct cpumask *local_mask;
12442 cpumask_var_t def, rcv;
12443 bool def_ret, rcv_ret;
12444 int first_general, last_general;
12445 int first_sdma, last_sdma;
12446 int first_rx, last_rx;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012447 int first_cpu, curr_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012448 int rcv_cpu, sdma_cpu;
12449 int i, ret = 0, possible;
12450 int ht;
12451
12452 /* calculate the ranges we are going to use */
12453 first_general = 0;
12454 first_sdma = last_general = first_general + 1;
12455 first_rx = last_sdma = first_sdma + dd->num_sdma;
12456 last_rx = first_rx + dd->n_krcv_queues;
12457
12458 /*
12459 * Interrupt affinity.
12460 *
12461 * non-rcv avail gets a default mask that
12462 * starts as possible cpus with threads reset
12463 * and each rcv avail reset.
12464 *
12465 * rcv avail gets node relative 1 wrapping back
12466 * to the node relative 1 as necessary.
12467 *
12468 */
12469 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12470 /* if first cpu is invalid, use NUMA 0 */
12471 if (cpumask_first(local_mask) >= nr_cpu_ids)
12472 local_mask = topology_core_cpumask(0);
12473
12474 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12475 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12476 if (!def_ret || !rcv_ret)
12477 goto bail;
12478 /* use local mask as default */
12479 cpumask_copy(def, local_mask);
12480 possible = cpumask_weight(def);
12481 /* disarm threads from default */
12482 ht = cpumask_weight(
12483 topology_sibling_cpumask(cpumask_first(local_mask)));
12484 for (i = possible/ht; i < possible; i++)
12485 cpumask_clear_cpu(i, def);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012486 /* def now has full cores on chosen node*/
12487 first_cpu = cpumask_first(def);
12488 if (nr_cpu_ids >= first_cpu)
12489 first_cpu++;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012490 curr_cpu = first_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012491
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012492 /* One context is reserved as control context */
12493 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012494 cpumask_clear_cpu(curr_cpu, def);
12495 cpumask_set_cpu(curr_cpu, rcv);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012496 curr_cpu = cpumask_next(curr_cpu, def);
12497 if (curr_cpu >= nr_cpu_ids)
12498 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012499 }
12500 /* def mask has non-rcv, rcv has recv mask */
12501 rcv_cpu = cpumask_first(rcv);
12502 sdma_cpu = cpumask_first(def);
12503
12504 /*
12505 * Sanity check - the code expects all SDMA chip source
12506 * interrupts to be in the same CSR, starting at bit 0. Verify
12507 * that this is true by checking the bit location of the start.
12508 */
12509 BUILD_BUG_ON(IS_SDMA_START % 64);
12510
12511 for (i = 0; i < dd->num_msix_entries; i++) {
12512 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12513 const char *err_info;
12514 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012515 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012516 void *arg;
12517 int idx;
12518 struct hfi1_ctxtdata *rcd = NULL;
12519 struct sdma_engine *sde = NULL;
12520
12521 /* obtain the arguments to request_irq */
12522 if (first_general <= i && i < last_general) {
12523 idx = i - first_general;
12524 handler = general_interrupt;
12525 arg = dd;
12526 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012527 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012528 err_info = "general";
12529 } else if (first_sdma <= i && i < last_sdma) {
12530 idx = i - first_sdma;
12531 sde = &dd->per_sdma[idx];
12532 handler = sdma_interrupt;
12533 arg = sde;
12534 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012535 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012536 err_info = "sdma";
12537 remap_sdma_interrupts(dd, idx, i);
12538 } else if (first_rx <= i && i < last_rx) {
12539 idx = i - first_rx;
12540 rcd = dd->rcd[idx];
12541 /* no interrupt if no rcd */
12542 if (!rcd)
12543 continue;
12544 /*
12545 * Set the interrupt register and mask for this
12546 * context's interrupt.
12547 */
12548 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12549 rcd->imask = ((u64)1) <<
12550 ((IS_RCVAVAIL_START+idx) % 64);
12551 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012552 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012553 arg = rcd;
12554 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012555 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012556 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012557 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012558 } else {
12559 /* not in our expected range - complain, then
12560 ignore it */
12561 dd_dev_err(dd,
12562 "Unexpected extra MSI-X interrupt %d\n", i);
12563 continue;
12564 }
12565 /* no argument, no interrupt */
12566 if (arg == NULL)
12567 continue;
12568 /* make sure the name is terminated */
12569 me->name[sizeof(me->name)-1] = 0;
12570
Dean Luickf4f30031c2015-10-26 10:28:44 -040012571 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12572 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012573 if (ret) {
12574 dd_dev_err(dd,
12575 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12576 err_info, me->msix.vector, idx, ret);
12577 return ret;
12578 }
12579 /*
12580 * assign arg after request_irq call, so it will be
12581 * cleaned up
12582 */
12583 me->arg = arg;
12584
12585 if (!zalloc_cpumask_var(
12586 &dd->msix_entries[i].mask,
12587 GFP_KERNEL))
12588 goto bail;
12589 if (handler == sdma_interrupt) {
12590 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12591 sde->this_idx, sdma_cpu);
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -050012592 sde->cpu = sdma_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012593 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12594 sdma_cpu = cpumask_next(sdma_cpu, def);
12595 if (sdma_cpu >= nr_cpu_ids)
12596 sdma_cpu = cpumask_first(def);
12597 } else if (handler == receive_context_interrupt) {
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012598 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12599 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12600 cpumask_first(def) : rcv_cpu);
12601 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12602 /* map to first default */
12603 cpumask_set_cpu(cpumask_first(def),
12604 dd->msix_entries[i].mask);
12605 } else {
12606 cpumask_set_cpu(rcv_cpu,
12607 dd->msix_entries[i].mask);
12608 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12609 if (rcv_cpu >= nr_cpu_ids)
12610 rcv_cpu = cpumask_first(rcv);
12611 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012612 } else {
12613 /* otherwise first def */
12614 dd_dev_info(dd, "%s cpu %d\n",
12615 err_info, cpumask_first(def));
12616 cpumask_set_cpu(
12617 cpumask_first(def), dd->msix_entries[i].mask);
12618 }
12619 irq_set_affinity_hint(
12620 dd->msix_entries[i].msix.vector,
12621 dd->msix_entries[i].mask);
12622 }
12623
12624out:
12625 free_cpumask_var(def);
12626 free_cpumask_var(rcv);
12627 return ret;
12628bail:
12629 ret = -ENOMEM;
12630 goto out;
12631}
12632
12633/*
12634 * Set the general handler to accept all interrupts, remap all
12635 * chip interrupts back to MSI-X 0.
12636 */
12637static void reset_interrupts(struct hfi1_devdata *dd)
12638{
12639 int i;
12640
12641 /* all interrupts handled by the general handler */
12642 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12643 dd->gi_mask[i] = ~(u64)0;
12644
12645 /* all chip interrupts map to MSI-X 0 */
12646 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12647 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12648}
12649
12650static int set_up_interrupts(struct hfi1_devdata *dd)
12651{
12652 struct hfi1_msix_entry *entries;
12653 u32 total, request;
12654 int i, ret;
12655 int single_interrupt = 0; /* we expect to have all the interrupts */
12656
12657 /*
12658 * Interrupt count:
12659 * 1 general, "slow path" interrupt (includes the SDMA engines
12660 * slow source, SDMACleanupDone)
12661 * N interrupts - one per used SDMA engine
12662 * M interrupt - one per kernel receive context
12663 */
12664 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12665
12666 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12667 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012668 ret = -ENOMEM;
12669 goto fail;
12670 }
12671 /* 1-1 MSI-X entry assignment */
12672 for (i = 0; i < total; i++)
12673 entries[i].msix.entry = i;
12674
12675 /* ask for MSI-X interrupts */
12676 request = total;
12677 request_msix(dd, &request, entries);
12678
12679 if (request == 0) {
12680 /* using INTx */
12681 /* dd->num_msix_entries already zero */
12682 kfree(entries);
12683 single_interrupt = 1;
12684 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12685 } else {
12686 /* using MSI-X */
12687 dd->num_msix_entries = request;
12688 dd->msix_entries = entries;
12689
12690 if (request != total) {
12691 /* using MSI-X, with reduced interrupts */
12692 dd_dev_err(
12693 dd,
12694 "cannot handle reduced interrupt case, want %u, got %u\n",
12695 total, request);
12696 ret = -EINVAL;
12697 goto fail;
12698 }
12699 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12700 }
12701
12702 /* mask all interrupts */
12703 set_intr_state(dd, 0);
12704 /* clear all pending interrupts */
12705 clear_all_interrupts(dd);
12706
12707 /* reset general handler mask, chip MSI-X mappings */
12708 reset_interrupts(dd);
12709
12710 if (single_interrupt)
12711 ret = request_intx_irq(dd);
12712 else
12713 ret = request_msix_irqs(dd);
12714 if (ret)
12715 goto fail;
12716
12717 return 0;
12718
12719fail:
12720 clean_up_interrupts(dd);
12721 return ret;
12722}
12723
12724/*
12725 * Set up context values in dd. Sets:
12726 *
12727 * num_rcv_contexts - number of contexts being used
12728 * n_krcv_queues - number of kernel contexts
12729 * first_user_ctxt - first non-kernel context in array of contexts
12730 * freectxts - number of free user contexts
12731 * num_send_contexts - number of PIO send contexts being used
12732 */
12733static int set_up_context_variables(struct hfi1_devdata *dd)
12734{
12735 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012736 int total_contexts;
12737 int ret;
12738 unsigned ngroups;
12739
12740 /*
12741 * Kernel contexts: (to be fixed later):
12742 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012743 * - Context 0 - control context (VL15/multicast/error)
12744 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012745 */
12746 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012747 /*
12748 * Don't count context 0 in n_krcvqs since
12749 * is isn't used for normal verbs traffic.
12750 *
12751 * krcvqs will reflect number of kernel
12752 * receive contexts above 0.
12753 */
12754 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012755 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012756 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012757 num_kernel_contexts =
12758 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12759 /*
12760 * Every kernel receive context needs an ACK send context.
12761 * one send context is allocated for each VL{0-7} and VL15
12762 */
12763 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12764 dd_dev_err(dd,
12765 "Reducing # kernel rcv contexts to: %d, from %d\n",
12766 (int)(dd->chip_send_contexts - num_vls - 1),
12767 (int)num_kernel_contexts);
12768 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12769 }
12770 /*
12771 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012772 * - default to 1 user context per CPU if num_user_contexts is
12773 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012774 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012775 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012776 num_user_contexts = num_online_cpus();
12777
12778 total_contexts = num_kernel_contexts + num_user_contexts;
12779
12780 /*
12781 * Adjust the counts given a global max.
12782 */
12783 if (total_contexts > dd->chip_rcv_contexts) {
12784 dd_dev_err(dd,
12785 "Reducing # user receive contexts to: %d, from %d\n",
12786 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12787 (int)num_user_contexts);
12788 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12789 /* recalculate */
12790 total_contexts = num_kernel_contexts + num_user_contexts;
12791 }
12792
12793 /* the first N are kernel contexts, the rest are user contexts */
12794 dd->num_rcv_contexts = total_contexts;
12795 dd->n_krcv_queues = num_kernel_contexts;
12796 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012797 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012798 dd->freectxts = num_user_contexts;
12799 dd_dev_info(dd,
12800 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12801 (int)dd->chip_rcv_contexts,
12802 (int)dd->num_rcv_contexts,
12803 (int)dd->n_krcv_queues,
12804 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12805
12806 /*
12807 * Receive array allocation:
12808 * All RcvArray entries are divided into groups of 8. This
12809 * is required by the hardware and will speed up writes to
12810 * consecutive entries by using write-combining of the entire
12811 * cacheline.
12812 *
12813 * The number of groups are evenly divided among all contexts.
12814 * any left over groups will be given to the first N user
12815 * contexts.
12816 */
12817 dd->rcv_entries.group_size = RCV_INCREMENT;
12818 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12819 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12820 dd->rcv_entries.nctxt_extra = ngroups -
12821 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12822 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12823 dd->rcv_entries.ngroups,
12824 dd->rcv_entries.nctxt_extra);
12825 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12826 MAX_EAGER_ENTRIES * 2) {
12827 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12828 dd->rcv_entries.group_size;
12829 dd_dev_info(dd,
12830 "RcvArray group count too high, change to %u\n",
12831 dd->rcv_entries.ngroups);
12832 dd->rcv_entries.nctxt_extra = 0;
12833 }
12834 /*
12835 * PIO send contexts
12836 */
12837 ret = init_sc_pools_and_sizes(dd);
12838 if (ret >= 0) { /* success */
12839 dd->num_send_contexts = ret;
12840 dd_dev_info(
12841 dd,
12842 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12843 dd->chip_send_contexts,
12844 dd->num_send_contexts,
12845 dd->sc_sizes[SC_KERNEL].count,
12846 dd->sc_sizes[SC_ACK].count,
12847 dd->sc_sizes[SC_USER].count);
12848 ret = 0; /* success */
12849 }
12850
12851 return ret;
12852}
12853
12854/*
12855 * Set the device/port partition key table. The MAD code
12856 * will ensure that, at least, the partial management
12857 * partition key is present in the table.
12858 */
12859static void set_partition_keys(struct hfi1_pportdata *ppd)
12860{
12861 struct hfi1_devdata *dd = ppd->dd;
12862 u64 reg = 0;
12863 int i;
12864
12865 dd_dev_info(dd, "Setting partition keys\n");
12866 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12867 reg |= (ppd->pkeys[i] &
12868 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12869 ((i % 4) *
12870 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12871 /* Each register holds 4 PKey values. */
12872 if ((i % 4) == 3) {
12873 write_csr(dd, RCV_PARTITION_KEY +
12874 ((i - 3) * 2), reg);
12875 reg = 0;
12876 }
12877 }
12878
12879 /* Always enable HW pkeys check when pkeys table is set */
12880 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12881}
12882
12883/*
12884 * These CSRs and memories are uninitialized on reset and must be
12885 * written before reading to set the ECC/parity bits.
12886 *
12887 * NOTE: All user context CSRs that are not mmaped write-only
12888 * (e.g. the TID flows) must be initialized even if the driver never
12889 * reads them.
12890 */
12891static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12892{
12893 int i, j;
12894
12895 /* CceIntMap */
12896 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12897 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12898
12899 /* SendCtxtCreditReturnAddr */
12900 for (i = 0; i < dd->chip_send_contexts; i++)
12901 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12902
12903 /* PIO Send buffers */
12904 /* SDMA Send buffers */
12905 /* These are not normally read, and (presently) have no method
12906 to be read, so are not pre-initialized */
12907
12908 /* RcvHdrAddr */
12909 /* RcvHdrTailAddr */
12910 /* RcvTidFlowTable */
12911 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12912 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12913 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12914 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12915 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12916 }
12917
12918 /* RcvArray */
12919 for (i = 0; i < dd->chip_rcv_array_count; i++)
12920 write_csr(dd, RCV_ARRAY + (8*i),
12921 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12922
12923 /* RcvQPMapTable */
12924 for (i = 0; i < 32; i++)
12925 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12926}
12927
12928/*
12929 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12930 */
12931static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12932 u64 ctrl_bits)
12933{
12934 unsigned long timeout;
12935 u64 reg;
12936
12937 /* is the condition present? */
12938 reg = read_csr(dd, CCE_STATUS);
12939 if ((reg & status_bits) == 0)
12940 return;
12941
12942 /* clear the condition */
12943 write_csr(dd, CCE_CTRL, ctrl_bits);
12944
12945 /* wait for the condition to clear */
12946 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12947 while (1) {
12948 reg = read_csr(dd, CCE_STATUS);
12949 if ((reg & status_bits) == 0)
12950 return;
12951 if (time_after(jiffies, timeout)) {
12952 dd_dev_err(dd,
12953 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12954 status_bits, reg & status_bits);
12955 return;
12956 }
12957 udelay(1);
12958 }
12959}
12960
12961/* set CCE CSRs to chip reset defaults */
12962static void reset_cce_csrs(struct hfi1_devdata *dd)
12963{
12964 int i;
12965
12966 /* CCE_REVISION read-only */
12967 /* CCE_REVISION2 read-only */
12968 /* CCE_CTRL - bits clear automatically */
12969 /* CCE_STATUS read-only, use CceCtrl to clear */
12970 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12971 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12972 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12973 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12974 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12975 /* CCE_ERR_STATUS read-only */
12976 write_csr(dd, CCE_ERR_MASK, 0);
12977 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12978 /* CCE_ERR_FORCE leave alone */
12979 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12980 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12981 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12982 /* CCE_PCIE_CTRL leave alone */
12983 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12984 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12985 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12986 CCE_MSIX_TABLE_UPPER_RESETCSR);
12987 }
12988 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12989 /* CCE_MSIX_PBA read-only */
12990 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12991 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12992 }
12993 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12994 write_csr(dd, CCE_INT_MAP, 0);
12995 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12996 /* CCE_INT_STATUS read-only */
12997 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12998 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12999 /* CCE_INT_FORCE leave alone */
13000 /* CCE_INT_BLOCKED read-only */
13001 }
13002 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13003 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13004}
13005
13006/* set ASIC CSRs to chip reset defaults */
13007static void reset_asic_csrs(struct hfi1_devdata *dd)
13008{
Mike Marciniszyn77241052015-07-30 15:17:43 -040013009 int i;
13010
13011 /*
13012 * If the HFIs are shared between separate nodes or VMs,
13013 * then more will need to be done here. One idea is a module
13014 * parameter that returns early, letting the first power-on or
13015 * a known first load do the reset and blocking all others.
13016 */
13017
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013018 if (!(dd->flags & HFI1_DO_INIT_ASIC))
13019 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013020
13021 if (dd->icode != ICODE_FPGA_EMULATION) {
13022 /* emulation does not have an SBus - leave these alone */
13023 /*
13024 * All writes to ASIC_CFG_SBUS_REQUEST do something.
13025 * Notes:
13026 * o The reset is not zero if aimed at the core. See the
13027 * SBus documentation for details.
13028 * o If the SBus firmware has been updated (e.g. by the BIOS),
13029 * will the reset revert that?
13030 */
13031 /* ASIC_CFG_SBUS_REQUEST leave alone */
13032 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
13033 }
13034 /* ASIC_SBUS_RESULT read-only */
13035 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
13036 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
13037 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
13038 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013039
13040 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013041 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013042
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050013043 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013044 /* ASIC_STS_THERM read-only */
13045 /* ASIC_CFG_RESET leave alone */
13046
13047 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
13048 /* ASIC_PCIE_SD_HOST_STATUS read-only */
13049 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
13050 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
13051 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
13052 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
13053 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
13054 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
13055 for (i = 0; i < 16; i++)
13056 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
13057
13058 /* ASIC_GPIO_IN read-only */
13059 write_csr(dd, ASIC_GPIO_OE, 0);
13060 write_csr(dd, ASIC_GPIO_INVERT, 0);
13061 write_csr(dd, ASIC_GPIO_OUT, 0);
13062 write_csr(dd, ASIC_GPIO_MASK, 0);
13063 /* ASIC_GPIO_STATUS read-only */
13064 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
13065 /* ASIC_GPIO_FORCE leave alone */
13066
13067 /* ASIC_QSFP1_IN read-only */
13068 write_csr(dd, ASIC_QSFP1_OE, 0);
13069 write_csr(dd, ASIC_QSFP1_INVERT, 0);
13070 write_csr(dd, ASIC_QSFP1_OUT, 0);
13071 write_csr(dd, ASIC_QSFP1_MASK, 0);
13072 /* ASIC_QSFP1_STATUS read-only */
13073 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13074 /* ASIC_QSFP1_FORCE leave alone */
13075
13076 /* ASIC_QSFP2_IN read-only */
13077 write_csr(dd, ASIC_QSFP2_OE, 0);
13078 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13079 write_csr(dd, ASIC_QSFP2_OUT, 0);
13080 write_csr(dd, ASIC_QSFP2_MASK, 0);
13081 /* ASIC_QSFP2_STATUS read-only */
13082 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13083 /* ASIC_QSFP2_FORCE leave alone */
13084
13085 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13086 /* this also writes a NOP command, clearing paging mode */
13087 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13088 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013089}
13090
13091/* set MISC CSRs to chip reset defaults */
13092static void reset_misc_csrs(struct hfi1_devdata *dd)
13093{
13094 int i;
13095
13096 for (i = 0; i < 32; i++) {
13097 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13098 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13099 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13100 }
13101 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13102 only be written 128-byte chunks */
13103 /* init RSA engine to clear lingering errors */
13104 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13105 write_csr(dd, MISC_CFG_RSA_MU, 0);
13106 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13107 /* MISC_STS_8051_DIGEST read-only */
13108 /* MISC_STS_SBM_DIGEST read-only */
13109 /* MISC_STS_PCIE_DIGEST read-only */
13110 /* MISC_STS_FAB_DIGEST read-only */
13111 /* MISC_ERR_STATUS read-only */
13112 write_csr(dd, MISC_ERR_MASK, 0);
13113 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13114 /* MISC_ERR_FORCE leave alone */
13115}
13116
13117/* set TXE CSRs to chip reset defaults */
13118static void reset_txe_csrs(struct hfi1_devdata *dd)
13119{
13120 int i;
13121
13122 /*
13123 * TXE Kernel CSRs
13124 */
13125 write_csr(dd, SEND_CTRL, 0);
13126 __cm_reset(dd, 0); /* reset CM internal state */
13127 /* SEND_CONTEXTS read-only */
13128 /* SEND_DMA_ENGINES read-only */
13129 /* SEND_PIO_MEM_SIZE read-only */
13130 /* SEND_DMA_MEM_SIZE read-only */
13131 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13132 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13133 /* SEND_PIO_ERR_STATUS read-only */
13134 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13135 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13136 /* SEND_PIO_ERR_FORCE leave alone */
13137 /* SEND_DMA_ERR_STATUS read-only */
13138 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13139 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13140 /* SEND_DMA_ERR_FORCE leave alone */
13141 /* SEND_EGRESS_ERR_STATUS read-only */
13142 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13143 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13144 /* SEND_EGRESS_ERR_FORCE leave alone */
13145 write_csr(dd, SEND_BTH_QP, 0);
13146 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13147 write_csr(dd, SEND_SC2VLT0, 0);
13148 write_csr(dd, SEND_SC2VLT1, 0);
13149 write_csr(dd, SEND_SC2VLT2, 0);
13150 write_csr(dd, SEND_SC2VLT3, 0);
13151 write_csr(dd, SEND_LEN_CHECK0, 0);
13152 write_csr(dd, SEND_LEN_CHECK1, 0);
13153 /* SEND_ERR_STATUS read-only */
13154 write_csr(dd, SEND_ERR_MASK, 0);
13155 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13156 /* SEND_ERR_FORCE read-only */
13157 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13158 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
13159 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13160 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
13161 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
13162 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
13163 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13164 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
13165 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13166 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
13167 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13168 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
13169 SEND_CM_GLOBAL_CREDIT_RESETCSR);
13170 /* SEND_CM_CREDIT_USED_STATUS read-only */
13171 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13172 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13173 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13174 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13175 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13176 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13177 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
13178 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13179 /* SEND_CM_CREDIT_USED_VL read-only */
13180 /* SEND_CM_CREDIT_USED_VL15 read-only */
13181 /* SEND_EGRESS_CTXT_STATUS read-only */
13182 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13183 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13184 /* SEND_EGRESS_ERR_INFO read-only */
13185 /* SEND_EGRESS_ERR_SOURCE read-only */
13186
13187 /*
13188 * TXE Per-Context CSRs
13189 */
13190 for (i = 0; i < dd->chip_send_contexts; i++) {
13191 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13192 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13193 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13194 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13195 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13196 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13197 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13198 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13199 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13200 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13201 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13202 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13203 }
13204
13205 /*
13206 * TXE Per-SDMA CSRs
13207 */
13208 for (i = 0; i < dd->chip_sdma_engines; i++) {
13209 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13210 /* SEND_DMA_STATUS read-only */
13211 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13212 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13213 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13214 /* SEND_DMA_HEAD read-only */
13215 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13216 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13217 /* SEND_DMA_IDLE_CNT read-only */
13218 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13219 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13220 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13221 /* SEND_DMA_ENG_ERR_STATUS read-only */
13222 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13223 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13224 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13225 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13226 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13227 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13228 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13229 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13230 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13231 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13232 }
13233}
13234
13235/*
13236 * Expect on entry:
13237 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13238 */
13239static void init_rbufs(struct hfi1_devdata *dd)
13240{
13241 u64 reg;
13242 int count;
13243
13244 /*
13245 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13246 * clear.
13247 */
13248 count = 0;
13249 while (1) {
13250 reg = read_csr(dd, RCV_STATUS);
13251 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13252 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13253 break;
13254 /*
13255 * Give up after 1ms - maximum wait time.
13256 *
13257 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13258 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13259 * 148 KB / (66% * 250MB/s) = 920us
13260 */
13261 if (count++ > 500) {
13262 dd_dev_err(dd,
13263 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13264 __func__, reg);
13265 break;
13266 }
13267 udelay(2); /* do not busy-wait the CSR */
13268 }
13269
13270 /* start the init - expect RcvCtrl to be 0 */
13271 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13272
13273 /*
13274 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13275 * period after the write before RcvStatus.RxRbufInitDone is valid.
13276 * The delay in the first run through the loop below is sufficient and
13277 * required before the first read of RcvStatus.RxRbufInintDone.
13278 */
13279 read_csr(dd, RCV_CTRL);
13280
13281 /* wait for the init to finish */
13282 count = 0;
13283 while (1) {
13284 /* delay is required first time through - see above */
13285 udelay(2); /* do not busy-wait the CSR */
13286 reg = read_csr(dd, RCV_STATUS);
13287 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13288 break;
13289
13290 /* give up after 100us - slowest possible at 33MHz is 73us */
13291 if (count++ > 50) {
13292 dd_dev_err(dd,
13293 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13294 __func__);
13295 break;
13296 }
13297 }
13298}
13299
13300/* set RXE CSRs to chip reset defaults */
13301static void reset_rxe_csrs(struct hfi1_devdata *dd)
13302{
13303 int i, j;
13304
13305 /*
13306 * RXE Kernel CSRs
13307 */
13308 write_csr(dd, RCV_CTRL, 0);
13309 init_rbufs(dd);
13310 /* RCV_STATUS read-only */
13311 /* RCV_CONTEXTS read-only */
13312 /* RCV_ARRAY_CNT read-only */
13313 /* RCV_BUF_SIZE read-only */
13314 write_csr(dd, RCV_BTH_QP, 0);
13315 write_csr(dd, RCV_MULTICAST, 0);
13316 write_csr(dd, RCV_BYPASS, 0);
13317 write_csr(dd, RCV_VL15, 0);
13318 /* this is a clear-down */
13319 write_csr(dd, RCV_ERR_INFO,
13320 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13321 /* RCV_ERR_STATUS read-only */
13322 write_csr(dd, RCV_ERR_MASK, 0);
13323 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13324 /* RCV_ERR_FORCE leave alone */
13325 for (i = 0; i < 32; i++)
13326 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13327 for (i = 0; i < 4; i++)
13328 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13329 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13330 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13331 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13332 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13333 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13334 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13335 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13336 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13337 }
13338 for (i = 0; i < 32; i++)
13339 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13340
13341 /*
13342 * RXE Kernel and User Per-Context CSRs
13343 */
13344 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13345 /* kernel */
13346 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13347 /* RCV_CTXT_STATUS read-only */
13348 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13349 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13350 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13351 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13352 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13353 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13354 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13355 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13356 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13357 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13358
13359 /* user */
13360 /* RCV_HDR_TAIL read-only */
13361 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13362 /* RCV_EGR_INDEX_TAIL read-only */
13363 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13364 /* RCV_EGR_OFFSET_TAIL read-only */
13365 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13366 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13367 0);
13368 }
13369 }
13370}
13371
13372/*
13373 * Set sc2vl tables.
13374 *
13375 * They power on to zeros, so to avoid send context errors
13376 * they need to be set:
13377 *
13378 * SC 0-7 -> VL 0-7 (respectively)
13379 * SC 15 -> VL 15
13380 * otherwise
13381 * -> VL 0
13382 */
13383static void init_sc2vl_tables(struct hfi1_devdata *dd)
13384{
13385 int i;
13386 /* init per architecture spec, constrained by hardware capability */
13387
13388 /* HFI maps sent packets */
13389 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13390 0,
13391 0, 0, 1, 1,
13392 2, 2, 3, 3,
13393 4, 4, 5, 5,
13394 6, 6, 7, 7));
13395 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13396 1,
13397 8, 0, 9, 0,
13398 10, 0, 11, 0,
13399 12, 0, 13, 0,
13400 14, 0, 15, 15));
13401 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13402 2,
13403 16, 0, 17, 0,
13404 18, 0, 19, 0,
13405 20, 0, 21, 0,
13406 22, 0, 23, 0));
13407 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13408 3,
13409 24, 0, 25, 0,
13410 26, 0, 27, 0,
13411 28, 0, 29, 0,
13412 30, 0, 31, 0));
13413
13414 /* DC maps received packets */
13415 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13416 15_0,
13417 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13418 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13419 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13420 31_16,
13421 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13422 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13423
13424 /* initialize the cached sc2vl values consistently with h/w */
13425 for (i = 0; i < 32; i++) {
13426 if (i < 8 || i == 15)
13427 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13428 else
13429 *((u8 *)(dd->sc2vl) + i) = 0;
13430 }
13431}
13432
13433/*
13434 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13435 * depend on the chip going through a power-on reset - a driver may be loaded
13436 * and unloaded many times.
13437 *
13438 * Do not write any CSR values to the chip in this routine - there may be
13439 * a reset following the (possible) FLR in this routine.
13440 *
13441 */
13442static void init_chip(struct hfi1_devdata *dd)
13443{
13444 int i;
13445
13446 /*
13447 * Put the HFI CSRs in a known state.
13448 * Combine this with a DC reset.
13449 *
13450 * Stop the device from doing anything while we do a
13451 * reset. We know there are no other active users of
13452 * the device since we are now in charge. Turn off
13453 * off all outbound and inbound traffic and make sure
13454 * the device does not generate any interrupts.
13455 */
13456
13457 /* disable send contexts and SDMA engines */
13458 write_csr(dd, SEND_CTRL, 0);
13459 for (i = 0; i < dd->chip_send_contexts; i++)
13460 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13461 for (i = 0; i < dd->chip_sdma_engines; i++)
13462 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13463 /* disable port (turn off RXE inbound traffic) and contexts */
13464 write_csr(dd, RCV_CTRL, 0);
13465 for (i = 0; i < dd->chip_rcv_contexts; i++)
13466 write_csr(dd, RCV_CTXT_CTRL, 0);
13467 /* mask all interrupt sources */
13468 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13469 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13470
13471 /*
13472 * DC Reset: do a full DC reset before the register clear.
13473 * A recommended length of time to hold is one CSR read,
13474 * so reread the CceDcCtrl. Then, hold the DC in reset
13475 * across the clear.
13476 */
13477 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13478 (void) read_csr(dd, CCE_DC_CTRL);
13479
13480 if (use_flr) {
13481 /*
13482 * A FLR will reset the SPC core and part of the PCIe.
13483 * The parts that need to be restored have already been
13484 * saved.
13485 */
13486 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13487
13488 /* do the FLR, the DC reset will remain */
13489 hfi1_pcie_flr(dd);
13490
13491 /* restore command and BARs */
13492 restore_pci_variables(dd);
13493
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013494 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013495 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13496 hfi1_pcie_flr(dd);
13497 restore_pci_variables(dd);
13498 }
13499
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013500 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013501 } else {
13502 dd_dev_info(dd, "Resetting CSRs with writes\n");
13503 reset_cce_csrs(dd);
13504 reset_txe_csrs(dd);
13505 reset_rxe_csrs(dd);
13506 reset_asic_csrs(dd);
13507 reset_misc_csrs(dd);
13508 }
13509 /* clear the DC reset */
13510 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013511
Mike Marciniszyn77241052015-07-30 15:17:43 -040013512 /* Set the LED off */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013513 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013514 setextled(dd, 0);
13515 /*
13516 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013517 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013518 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013519 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013520 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013521 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013522 * I2CCLK and I2CDAT will change per direction, and INT_N and
13523 * MODPRS_N are input only and their value is ignored.
13524 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013525 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13526 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013527}
13528
13529static void init_early_variables(struct hfi1_devdata *dd)
13530{
13531 int i;
13532
13533 /* assign link credit variables */
13534 dd->vau = CM_VAU;
13535 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013536 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013537 dd->link_credits--;
13538 dd->vcu = cu_to_vcu(hfi1_cu);
13539 /* enough room for 8 MAD packets plus header - 17K */
13540 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13541 if (dd->vl15_init > dd->link_credits)
13542 dd->vl15_init = dd->link_credits;
13543
13544 write_uninitialized_csrs_and_memories(dd);
13545
13546 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13547 for (i = 0; i < dd->num_pports; i++) {
13548 struct hfi1_pportdata *ppd = &dd->pport[i];
13549
13550 set_partition_keys(ppd);
13551 }
13552 init_sc2vl_tables(dd);
13553}
13554
13555static void init_kdeth_qp(struct hfi1_devdata *dd)
13556{
13557 /* user changed the KDETH_QP */
13558 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13559 /* out of range or illegal value */
13560 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13561 kdeth_qp = 0;
13562 }
13563 if (kdeth_qp == 0) /* not set, or failed range check */
13564 kdeth_qp = DEFAULT_KDETH_QP;
13565
13566 write_csr(dd, SEND_BTH_QP,
13567 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13568 << SEND_BTH_QP_KDETH_QP_SHIFT);
13569
13570 write_csr(dd, RCV_BTH_QP,
13571 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13572 << RCV_BTH_QP_KDETH_QP_SHIFT);
13573}
13574
13575/**
13576 * init_qpmap_table
13577 * @dd - device data
13578 * @first_ctxt - first context
13579 * @last_ctxt - first context
13580 *
13581 * This return sets the qpn mapping table that
13582 * is indexed by qpn[8:1].
13583 *
13584 * The routine will round robin the 256 settings
13585 * from first_ctxt to last_ctxt.
13586 *
13587 * The first/last looks ahead to having specialized
13588 * receive contexts for mgmt and bypass. Normal
13589 * verbs traffic will assumed to be on a range
13590 * of receive contexts.
13591 */
13592static void init_qpmap_table(struct hfi1_devdata *dd,
13593 u32 first_ctxt,
13594 u32 last_ctxt)
13595{
13596 u64 reg = 0;
13597 u64 regno = RCV_QP_MAP_TABLE;
13598 int i;
13599 u64 ctxt = first_ctxt;
13600
13601 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013602 reg |= ctxt << (8 * (i % 8));
13603 i++;
13604 ctxt++;
13605 if (ctxt > last_ctxt)
13606 ctxt = first_ctxt;
13607 if (i % 8 == 0) {
13608 write_csr(dd, regno, reg);
13609 reg = 0;
13610 regno += 8;
13611 }
13612 }
13613 if (i % 8)
13614 write_csr(dd, regno, reg);
13615
13616 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13617 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13618}
13619
13620/**
13621 * init_qos - init RX qos
13622 * @dd - device data
13623 * @first_context
13624 *
13625 * This routine initializes Rule 0 and the
13626 * RSM map table to implement qos.
13627 *
13628 * If all of the limit tests succeed,
13629 * qos is applied based on the array
13630 * interpretation of krcvqs where
13631 * entry 0 is VL0.
13632 *
13633 * The number of vl bits (n) and the number of qpn
13634 * bits (m) are computed to feed both the RSM map table
13635 * and the single rule.
13636 *
13637 */
13638static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13639{
13640 u8 max_by_vl = 0;
13641 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13642 u64 *rsmmap;
13643 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013644 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013645
13646 /* validate */
13647 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13648 num_vls == 1 ||
13649 krcvqsset <= 1)
13650 goto bail;
13651 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13652 if (krcvqs[i] > max_by_vl)
13653 max_by_vl = krcvqs[i];
13654 if (max_by_vl > 32)
13655 goto bail;
13656 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13657 /* determine bits vl */
13658 n = ilog2(num_vls);
13659 /* determine bits for qpn */
13660 m = ilog2(qpns_per_vl);
13661 if ((m + n) > 7)
13662 goto bail;
13663 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13664 goto bail;
13665 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013666 if (!rsmmap)
13667 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013668 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13669 /* init the local copy of the table */
13670 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13671 unsigned tctxt;
13672
13673 for (qpn = 0, tctxt = ctxt;
13674 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13675 unsigned idx, regoff, regidx;
13676
13677 /* generate index <= 128 */
13678 idx = (qpn << n) ^ i;
13679 regoff = (idx % 8) * 8;
13680 regidx = idx / 8;
13681 reg = rsmmap[regidx];
13682 /* replace 0xff with context number */
13683 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13684 << regoff);
13685 reg |= (u64)(tctxt++) << regoff;
13686 rsmmap[regidx] = reg;
13687 if (tctxt == ctxt + krcvqs[i])
13688 tctxt = ctxt;
13689 }
13690 ctxt += krcvqs[i];
13691 }
13692 /* flush cached copies to chip */
13693 for (i = 0; i < NUM_MAP_REGS; i++)
13694 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13695 /* add rule0 */
13696 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13697 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13698 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13699 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13700 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13701 LRH_BTH_MATCH_OFFSET
13702 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13703 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13704 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13705 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13706 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13707 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13708 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13709 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13710 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13711 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13712 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13713 /* Enable RSM */
13714 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13715 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013716 /* map everything else to first context */
13717 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013718 dd->qos_shift = n + 1;
13719 return;
13720bail:
13721 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013722 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013723}
13724
13725static void init_rxe(struct hfi1_devdata *dd)
13726{
13727 /* enable all receive errors */
13728 write_csr(dd, RCV_ERR_MASK, ~0ull);
13729 /* setup QPN map table - start where VL15 context leaves off */
13730 init_qos(
13731 dd,
13732 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13733 /*
13734 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13735 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13736 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13737 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13738 * Max_PayLoad_Size set to its minimum of 128.
13739 *
13740 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13741 * (64 bytes). Max_Payload_Size is possibly modified upward in
13742 * tune_pcie_caps() which is called after this routine.
13743 */
13744}
13745
13746static void init_other(struct hfi1_devdata *dd)
13747{
13748 /* enable all CCE errors */
13749 write_csr(dd, CCE_ERR_MASK, ~0ull);
13750 /* enable *some* Misc errors */
13751 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13752 /* enable all DC errors, except LCB */
13753 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13754 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13755}
13756
13757/*
13758 * Fill out the given AU table using the given CU. A CU is defined in terms
13759 * AUs. The table is a an encoding: given the index, how many AUs does that
13760 * represent?
13761 *
13762 * NOTE: Assumes that the register layout is the same for the
13763 * local and remote tables.
13764 */
13765static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13766 u32 csr0to3, u32 csr4to7)
13767{
13768 write_csr(dd, csr0to3,
13769 0ull <<
13770 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13771 | 1ull <<
13772 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13773 | 2ull * cu <<
13774 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13775 | 4ull * cu <<
13776 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13777 write_csr(dd, csr4to7,
13778 8ull * cu <<
13779 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13780 | 16ull * cu <<
13781 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13782 | 32ull * cu <<
13783 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13784 | 64ull * cu <<
13785 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13786
13787}
13788
13789static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13790{
13791 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13792 SEND_CM_LOCAL_AU_TABLE4_TO7);
13793}
13794
13795void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13796{
13797 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13798 SEND_CM_REMOTE_AU_TABLE4_TO7);
13799}
13800
13801static void init_txe(struct hfi1_devdata *dd)
13802{
13803 int i;
13804
13805 /* enable all PIO, SDMA, general, and Egress errors */
13806 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13807 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13808 write_csr(dd, SEND_ERR_MASK, ~0ull);
13809 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13810
13811 /* enable all per-context and per-SDMA engine errors */
13812 for (i = 0; i < dd->chip_send_contexts; i++)
13813 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13814 for (i = 0; i < dd->chip_sdma_engines; i++)
13815 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13816
13817 /* set the local CU to AU mapping */
13818 assign_local_cm_au_table(dd, dd->vcu);
13819
13820 /*
13821 * Set reasonable default for Credit Return Timer
13822 * Don't set on Simulator - causes it to choke.
13823 */
13824 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13825 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13826}
13827
13828int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13829{
13830 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13831 unsigned sctxt;
13832 int ret = 0;
13833 u64 reg;
13834
13835 if (!rcd || !rcd->sc) {
13836 ret = -EINVAL;
13837 goto done;
13838 }
13839 sctxt = rcd->sc->hw_context;
13840 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13841 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13842 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13843 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13844 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13845 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13846 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13847 /*
13848 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013849 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013850 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013851 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13852 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13853 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13854 }
13855
13856 /* Enable J_KEY check on receive context. */
13857 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13858 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13859 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13860 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13861done:
13862 return ret;
13863}
13864
13865int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13866{
13867 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13868 unsigned sctxt;
13869 int ret = 0;
13870 u64 reg;
13871
13872 if (!rcd || !rcd->sc) {
13873 ret = -EINVAL;
13874 goto done;
13875 }
13876 sctxt = rcd->sc->hw_context;
13877 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13878 /*
13879 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13880 * This check would not have been enabled for A0 h/w, see
13881 * set_ctxt_jkey().
13882 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013883 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013884 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13885 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13886 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13887 }
13888 /* Turn off the J_KEY on the receive side */
13889 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13890done:
13891 return ret;
13892}
13893
13894int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13895{
13896 struct hfi1_ctxtdata *rcd;
13897 unsigned sctxt;
13898 int ret = 0;
13899 u64 reg;
13900
13901 if (ctxt < dd->num_rcv_contexts)
13902 rcd = dd->rcd[ctxt];
13903 else {
13904 ret = -EINVAL;
13905 goto done;
13906 }
13907 if (!rcd || !rcd->sc) {
13908 ret = -EINVAL;
13909 goto done;
13910 }
13911 sctxt = rcd->sc->hw_context;
13912 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13913 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13914 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13915 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13916 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13917 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13918done:
13919 return ret;
13920}
13921
13922int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13923{
13924 struct hfi1_ctxtdata *rcd;
13925 unsigned sctxt;
13926 int ret = 0;
13927 u64 reg;
13928
13929 if (ctxt < dd->num_rcv_contexts)
13930 rcd = dd->rcd[ctxt];
13931 else {
13932 ret = -EINVAL;
13933 goto done;
13934 }
13935 if (!rcd || !rcd->sc) {
13936 ret = -EINVAL;
13937 goto done;
13938 }
13939 sctxt = rcd->sc->hw_context;
13940 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13941 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13942 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13943 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13944done:
13945 return ret;
13946}
13947
13948/*
13949 * Start doing the clean up the the chip. Our clean up happens in multiple
13950 * stages and this is just the first.
13951 */
13952void hfi1_start_cleanup(struct hfi1_devdata *dd)
13953{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013954 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013955 free_cntrs(dd);
13956 free_rcverr(dd);
13957 clean_up_interrupts(dd);
13958}
13959
13960#define HFI_BASE_GUID(dev) \
13961 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13962
13963/*
13964 * Certain chip functions need to be initialized only once per asic
13965 * instead of per-device. This function finds the peer device and
13966 * checks whether that chip initialization needs to be done by this
13967 * device.
13968 */
13969static void asic_should_init(struct hfi1_devdata *dd)
13970{
13971 unsigned long flags;
13972 struct hfi1_devdata *tmp, *peer = NULL;
13973
13974 spin_lock_irqsave(&hfi1_devs_lock, flags);
13975 /* Find our peer device */
13976 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13977 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13978 dd->unit != tmp->unit) {
13979 peer = tmp;
13980 break;
13981 }
13982 }
13983
13984 /*
13985 * "Claim" the ASIC for initialization if it hasn't been
13986 " "claimed" yet.
13987 */
13988 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13989 dd->flags |= HFI1_DO_INIT_ASIC;
13990 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13991}
13992
Dean Luick5d9157a2015-11-16 21:59:34 -050013993/*
13994 * Set dd->boardname. Use a generic name if a name is not returned from
13995 * EFI variable space.
13996 *
13997 * Return 0 on success, -ENOMEM if space could not be allocated.
13998 */
13999static int obtain_boardname(struct hfi1_devdata *dd)
14000{
14001 /* generic board description */
14002 const char generic[] =
14003 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14004 unsigned long size;
14005 int ret;
14006
14007 ret = read_hfi1_efi_var(dd, "description", &size,
14008 (void **)&dd->boardname);
14009 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014010 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014011 /* use generic description */
14012 dd->boardname = kstrdup(generic, GFP_KERNEL);
14013 if (!dd->boardname)
14014 return -ENOMEM;
14015 }
14016 return 0;
14017}
14018
Mike Marciniszyn77241052015-07-30 15:17:43 -040014019/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014020 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014021 * @dev: the pci_dev for hfi1_ib device
14022 * @ent: pci_device_id struct for this dev
14023 *
14024 * Also allocates, initializes, and returns the devdata struct for this
14025 * device instance
14026 *
14027 * This is global, and is called directly at init to set up the
14028 * chip-specific function pointers for later use.
14029 */
14030struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14031 const struct pci_device_id *ent)
14032{
14033 struct hfi1_devdata *dd;
14034 struct hfi1_pportdata *ppd;
14035 u64 reg;
14036 int i, ret;
14037 static const char * const inames[] = { /* implementation names */
14038 "RTL silicon",
14039 "RTL VCS simulation",
14040 "RTL FPGA emulation",
14041 "Functional simulator"
14042 };
14043
14044 dd = hfi1_alloc_devdata(pdev,
14045 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
14046 if (IS_ERR(dd))
14047 goto bail;
14048 ppd = dd->pport;
14049 for (i = 0; i < dd->num_pports; i++, ppd++) {
14050 int vl;
14051 /* init common fields */
14052 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14053 /* DC supports 4 link widths */
14054 ppd->link_width_supported =
14055 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14056 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14057 ppd->link_width_downgrade_supported =
14058 ppd->link_width_supported;
14059 /* start out enabling only 4X */
14060 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14061 ppd->link_width_downgrade_enabled =
14062 ppd->link_width_downgrade_supported;
14063 /* link width active is 0 when link is down */
14064 /* link width downgrade active is 0 when link is down */
14065
14066 if (num_vls < HFI1_MIN_VLS_SUPPORTED
14067 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
14068 hfi1_early_err(&pdev->dev,
14069 "Invalid num_vls %u, using %u VLs\n",
14070 num_vls, HFI1_MAX_VLS_SUPPORTED);
14071 num_vls = HFI1_MAX_VLS_SUPPORTED;
14072 }
14073 ppd->vls_supported = num_vls;
14074 ppd->vls_operational = ppd->vls_supported;
14075 /* Set the default MTU. */
14076 for (vl = 0; vl < num_vls; vl++)
14077 dd->vld[vl].mtu = hfi1_max_mtu;
14078 dd->vld[15].mtu = MAX_MAD_PACKET;
14079 /*
14080 * Set the initial values to reasonable default, will be set
14081 * for real when link is up.
14082 */
14083 ppd->lstate = IB_PORT_DOWN;
14084 ppd->overrun_threshold = 0x4;
14085 ppd->phy_error_threshold = 0xf;
14086 ppd->port_crc_mode_enabled = link_crc_mask;
14087 /* initialize supported LTP CRC mode */
14088 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14089 /* initialize enabled LTP CRC mode */
14090 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14091 /* start in offline */
14092 ppd->host_link_state = HLS_DN_OFFLINE;
14093 init_vl_arb_caches(ppd);
14094 }
14095
14096 dd->link_default = HLS_DN_POLL;
14097
14098 /*
14099 * Do remaining PCIe setup and save PCIe values in dd.
14100 * Any error printing is already done by the init code.
14101 * On return, we have the chip mapped.
14102 */
14103 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14104 if (ret < 0)
14105 goto bail_free;
14106
14107 /* verify that reads actually work, save revision for reset check */
14108 dd->revision = read_csr(dd, CCE_REVISION);
14109 if (dd->revision == ~(u64)0) {
14110 dd_dev_err(dd, "cannot read chip CSRs\n");
14111 ret = -EINVAL;
14112 goto bail_cleanup;
14113 }
14114 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14115 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14116 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14117 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14118
14119 /* obtain the hardware ID - NOT related to unit, which is a
14120 software enumeration */
14121 reg = read_csr(dd, CCE_REVISION2);
14122 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14123 & CCE_REVISION2_HFI_ID_MASK;
14124 /* the variable size will remove unwanted bits */
14125 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14126 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14127 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14128 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
14129 (int)dd->irev);
14130
14131 /* speeds the hardware can support */
14132 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14133 /* speeds allowed to run at */
14134 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14135 /* give a reasonable active value, will be set on link up */
14136 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14137
14138 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14139 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14140 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14141 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14142 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14143 /* fix up link widths for emulation _p */
14144 ppd = dd->pport;
14145 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14146 ppd->link_width_supported =
14147 ppd->link_width_enabled =
14148 ppd->link_width_downgrade_supported =
14149 ppd->link_width_downgrade_enabled =
14150 OPA_LINK_WIDTH_1X;
14151 }
14152 /* insure num_vls isn't larger than number of sdma engines */
14153 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14154 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014155 num_vls, dd->chip_sdma_engines);
14156 num_vls = dd->chip_sdma_engines;
14157 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014158 }
14159
14160 /*
14161 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14162 * Limit the max if larger than the field holds. If timeout is
14163 * non-zero, then the calculated field will be at least 1.
14164 *
14165 * Must be after icode is set up - the cclock rate depends
14166 * on knowing the hardware being used.
14167 */
14168 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14169 if (dd->rcv_intr_timeout_csr >
14170 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14171 dd->rcv_intr_timeout_csr =
14172 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14173 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14174 dd->rcv_intr_timeout_csr = 1;
14175
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014176 /* needs to be done before we look for the peer device */
14177 read_guid(dd);
14178
14179 /* should this device init the ASIC block? */
14180 asic_should_init(dd);
14181
Mike Marciniszyn77241052015-07-30 15:17:43 -040014182 /* obtain chip sizes, reset chip CSRs */
14183 init_chip(dd);
14184
14185 /* read in the PCIe link speed information */
14186 ret = pcie_speeds(dd);
14187 if (ret)
14188 goto bail_cleanup;
14189
Mike Marciniszyn77241052015-07-30 15:17:43 -040014190 /* read in firmware */
14191 ret = hfi1_firmware_init(dd);
14192 if (ret)
14193 goto bail_cleanup;
14194
14195 /*
14196 * In general, the PCIe Gen3 transition must occur after the
14197 * chip has been idled (so it won't initiate any PCIe transactions
14198 * e.g. an interrupt) and before the driver changes any registers
14199 * (the transition will reset the registers).
14200 *
14201 * In particular, place this call after:
14202 * - init_chip() - the chip will not initiate any PCIe transactions
14203 * - pcie_speeds() - reads the current link speed
14204 * - hfi1_firmware_init() - the needed firmware is ready to be
14205 * downloaded
14206 */
14207 ret = do_pcie_gen3_transition(dd);
14208 if (ret)
14209 goto bail_cleanup;
14210
14211 /* start setting dd values and adjusting CSRs */
14212 init_early_variables(dd);
14213
14214 parse_platform_config(dd);
14215
Dean Luick5d9157a2015-11-16 21:59:34 -050014216 ret = obtain_boardname(dd);
14217 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014218 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014219
14220 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014221 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014222 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014223 (u32)dd->majrev,
14224 (u32)dd->minrev,
14225 (dd->revision >> CCE_REVISION_SW_SHIFT)
14226 & CCE_REVISION_SW_MASK);
14227
14228 ret = set_up_context_variables(dd);
14229 if (ret)
14230 goto bail_cleanup;
14231
14232 /* set initial RXE CSRs */
14233 init_rxe(dd);
14234 /* set initial TXE CSRs */
14235 init_txe(dd);
14236 /* set initial non-RXE, non-TXE CSRs */
14237 init_other(dd);
14238 /* set up KDETH QP prefix in both RX and TX CSRs */
14239 init_kdeth_qp(dd);
14240
14241 /* send contexts must be set up before receive contexts */
14242 ret = init_send_contexts(dd);
14243 if (ret)
14244 goto bail_cleanup;
14245
14246 ret = hfi1_create_ctxts(dd);
14247 if (ret)
14248 goto bail_cleanup;
14249
14250 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14251 /*
14252 * rcd[0] is guaranteed to be valid by this point. Also, all
14253 * context are using the same value, as per the module parameter.
14254 */
14255 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14256
14257 ret = init_pervl_scs(dd);
14258 if (ret)
14259 goto bail_cleanup;
14260
14261 /* sdma init */
14262 for (i = 0; i < dd->num_pports; ++i) {
14263 ret = sdma_init(dd, i);
14264 if (ret)
14265 goto bail_cleanup;
14266 }
14267
14268 /* use contexts created by hfi1_create_ctxts */
14269 ret = set_up_interrupts(dd);
14270 if (ret)
14271 goto bail_cleanup;
14272
14273 /* set up LCB access - must be after set_up_interrupts() */
14274 init_lcb_access(dd);
14275
14276 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14277 dd->base_guid & 0xFFFFFF);
14278
14279 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14280 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14281 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14282
14283 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14284 if (ret)
14285 goto bail_clear_intr;
14286 check_fabric_firmware_versions(dd);
14287
14288 thermal_init(dd);
14289
14290 ret = init_cntrs(dd);
14291 if (ret)
14292 goto bail_clear_intr;
14293
14294 ret = init_rcverr(dd);
14295 if (ret)
14296 goto bail_free_cntrs;
14297
14298 ret = eprom_init(dd);
14299 if (ret)
14300 goto bail_free_rcverr;
14301
14302 goto bail;
14303
14304bail_free_rcverr:
14305 free_rcverr(dd);
14306bail_free_cntrs:
14307 free_cntrs(dd);
14308bail_clear_intr:
14309 clean_up_interrupts(dd);
14310bail_cleanup:
14311 hfi1_pcie_ddcleanup(dd);
14312bail_free:
14313 hfi1_free_devdata(dd);
14314 dd = ERR_PTR(ret);
14315bail:
14316 return dd;
14317}
14318
14319static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14320 u32 dw_len)
14321{
14322 u32 delta_cycles;
14323 u32 current_egress_rate = ppd->current_egress_rate;
14324 /* rates here are in units of 10^6 bits/sec */
14325
14326 if (desired_egress_rate == -1)
14327 return 0; /* shouldn't happen */
14328
14329 if (desired_egress_rate >= current_egress_rate)
14330 return 0; /* we can't help go faster, only slower */
14331
14332 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14333 egress_cycles(dw_len * 4, current_egress_rate);
14334
14335 return (u16)delta_cycles;
14336}
14337
14338
14339/**
14340 * create_pbc - build a pbc for transmission
14341 * @flags: special case flags or-ed in built pbc
14342 * @srate: static rate
14343 * @vl: vl
14344 * @dwlen: dword length (header words + data words + pbc words)
14345 *
14346 * Create a PBC with the given flags, rate, VL, and length.
14347 *
14348 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14349 * for verbs, which does not use this PSM feature. The lone other caller
14350 * is for the diagnostic interface which calls this if the user does not
14351 * supply their own PBC.
14352 */
14353u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14354 u32 dw_len)
14355{
14356 u64 pbc, delay = 0;
14357
14358 if (unlikely(srate_mbs))
14359 delay = delay_cycles(ppd, srate_mbs, dw_len);
14360
14361 pbc = flags
14362 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14363 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14364 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14365 | (dw_len & PBC_LENGTH_DWS_MASK)
14366 << PBC_LENGTH_DWS_SHIFT;
14367
14368 return pbc;
14369}
14370
14371#define SBUS_THERMAL 0x4f
14372#define SBUS_THERM_MONITOR_MODE 0x1
14373
14374#define THERM_FAILURE(dev, ret, reason) \
14375 dd_dev_err((dd), \
14376 "Thermal sensor initialization failed: %s (%d)\n", \
14377 (reason), (ret))
14378
14379/*
14380 * Initialize the Avago Thermal sensor.
14381 *
14382 * After initialization, enable polling of thermal sensor through
14383 * SBus interface. In order for this to work, the SBus Master
14384 * firmware has to be loaded due to the fact that the HW polling
14385 * logic uses SBus interrupts, which are not supported with
14386 * default firmware. Otherwise, no data will be returned through
14387 * the ASIC_STS_THERM CSR.
14388 */
14389static int thermal_init(struct hfi1_devdata *dd)
14390{
14391 int ret = 0;
14392
14393 if (dd->icode != ICODE_RTL_SILICON ||
14394 !(dd->flags & HFI1_DO_INIT_ASIC))
14395 return ret;
14396
14397 acquire_hw_mutex(dd);
14398 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014399 /* Disable polling of thermal readings */
14400 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14401 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014402 /* Thermal Sensor Initialization */
14403 /* Step 1: Reset the Thermal SBus Receiver */
14404 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14405 RESET_SBUS_RECEIVER, 0);
14406 if (ret) {
14407 THERM_FAILURE(dd, ret, "Bus Reset");
14408 goto done;
14409 }
14410 /* Step 2: Set Reset bit in Thermal block */
14411 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14412 WRITE_SBUS_RECEIVER, 0x1);
14413 if (ret) {
14414 THERM_FAILURE(dd, ret, "Therm Block Reset");
14415 goto done;
14416 }
14417 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14418 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14419 WRITE_SBUS_RECEIVER, 0x32);
14420 if (ret) {
14421 THERM_FAILURE(dd, ret, "Write Clock Div");
14422 goto done;
14423 }
14424 /* Step 4: Select temperature mode */
14425 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14426 WRITE_SBUS_RECEIVER,
14427 SBUS_THERM_MONITOR_MODE);
14428 if (ret) {
14429 THERM_FAILURE(dd, ret, "Write Mode Sel");
14430 goto done;
14431 }
14432 /* Step 5: De-assert block reset and start conversion */
14433 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14434 WRITE_SBUS_RECEIVER, 0x2);
14435 if (ret) {
14436 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14437 goto done;
14438 }
14439 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14440 msleep(22);
14441
14442 /* Enable polling of thermal readings */
14443 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14444done:
14445 release_hw_mutex(dd);
14446 return ret;
14447}
14448
14449static void handle_temp_err(struct hfi1_devdata *dd)
14450{
14451 struct hfi1_pportdata *ppd = &dd->pport[0];
14452 /*
14453 * Thermal Critical Interrupt
14454 * Put the device into forced freeze mode, take link down to
14455 * offline, and put DC into reset.
14456 */
14457 dd_dev_emerg(dd,
14458 "Critical temperature reached! Forcing device into freeze mode!\n");
14459 dd->flags |= HFI1_FORCED_FREEZE;
14460 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14461 /*
14462 * Shut DC down as much and as quickly as possible.
14463 *
14464 * Step 1: Take the link down to OFFLINE. This will cause the
14465 * 8051 to put the Serdes in reset. However, we don't want to
14466 * go through the entire link state machine since we want to
14467 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14468 * but rather an attempt to save the chip.
14469 * Code below is almost the same as quiet_serdes() but avoids
14470 * all the extra work and the sleeps.
14471 */
14472 ppd->driver_link_ready = 0;
14473 ppd->link_enabled = 0;
14474 set_physical_link_state(dd, PLS_OFFLINE |
14475 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14476 /*
14477 * Step 2: Shutdown LCB and 8051
14478 * After shutdown, do not restore DC_CFG_RESET value.
14479 */
14480 dc_shutdown(dd);
14481}