blob: b4c017abab9f98d6c0ea4fa760599a2f982c11e2 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080068#include "aspm.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040069
70#define NUM_IB_PORTS 1
71
72uint kdeth_qp;
73module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
74MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75
76uint num_vls = HFI1_MAX_VLS_SUPPORTED;
77module_param(num_vls, uint, S_IRUGO);
78MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79
80/*
81 * Default time to aggregate two 10K packets from the idle state
82 * (timer not running). The timer starts at the end of the first packet,
83 * so only the time for one 10K packet and header plus a bit extra is needed.
84 * 10 * 1024 + 64 header byte = 10304 byte
85 * 10304 byte / 12.5 GB/s = 824.32ns
86 */
87uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
88module_param(rcv_intr_timeout, uint, S_IRUGO);
89MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90
91uint rcv_intr_count = 16; /* same as qib */
92module_param(rcv_intr_count, uint, S_IRUGO);
93MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94
95ushort link_crc_mask = SUPPORTED_CRCS;
96module_param(link_crc_mask, ushort, S_IRUGO);
97MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98
99uint loopback;
100module_param_named(loopback, loopback, uint, S_IRUGO);
101MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102
103/* Other driver tunables */
104uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
105static ushort crc_14b_sideband = 1;
106static uint use_flr = 1;
107uint quick_linkup; /* skip LNI */
108
109struct flag_table {
110 u64 flag; /* the flag */
111 char *str; /* description string */
112 u16 extra; /* extra information */
113 u16 unused0;
114 u32 unused1;
115};
116
117/* str must be a string constant */
118#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
119#define FLAG_ENTRY0(str, flag) {flag, str, 0}
120
121/* Send Error Consequences */
122#define SEC_WRITE_DROPPED 0x1
123#define SEC_PACKET_DROPPED 0x2
124#define SEC_SC_HALTED 0x4 /* per-context only */
125#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400129#define NUM_MAP_REGS 32
130
131/* Bit offset into the GUID which carries HFI id information */
132#define GUID_HFI_INDEX_SHIFT 39
133
134/* extract the emulation revision */
135#define emulator_rev(dd) ((dd)->irev >> 8)
136/* parallel and serial emulation versions are 3 and 4 respectively */
137#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
138#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
139
140/* RSM fields */
141
142/* packet type */
143#define IB_PACKET_TYPE 2ull
144#define QW_SHIFT 6ull
145/* QPN[7..1] */
146#define QPN_WIDTH 7ull
147
148/* LRH.BTH: QW 0, OFFSET 48 - for match */
149#define LRH_BTH_QW 0ull
150#define LRH_BTH_BIT_OFFSET 48ull
151#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
152#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
153#define LRH_BTH_SELECT
154#define LRH_BTH_MASK 3ull
155#define LRH_BTH_VALUE 2ull
156
157/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
158#define LRH_SC_QW 0ull
159#define LRH_SC_BIT_OFFSET 56ull
160#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
161#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
162#define LRH_SC_MASK 128ull
163#define LRH_SC_VALUE 0ull
164
165/* SC[n..0] QW 0, OFFSET 60 - for select */
166#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
167
168/* QPN[m+n:1] QW 1, OFFSET 1 */
169#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
170
171/* defines to build power on SC2VL table */
172#define SC2VL_VAL( \
173 num, \
174 sc0, sc0val, \
175 sc1, sc1val, \
176 sc2, sc2val, \
177 sc3, sc3val, \
178 sc4, sc4val, \
179 sc5, sc5val, \
180 sc6, sc6val, \
181 sc7, sc7val) \
182( \
183 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
184 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
185 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
186 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
187 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
188 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
189 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
190 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
191)
192
193#define DC_SC_VL_VAL( \
194 range, \
195 e0, e0val, \
196 e1, e1val, \
197 e2, e2val, \
198 e3, e3val, \
199 e4, e4val, \
200 e5, e5val, \
201 e6, e6val, \
202 e7, e7val, \
203 e8, e8val, \
204 e9, e9val, \
205 e10, e10val, \
206 e11, e11val, \
207 e12, e12val, \
208 e13, e13val, \
209 e14, e14val, \
210 e15, e15val) \
211( \
212 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
213 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
214 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
215 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
216 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
217 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
218 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
219 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
220 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
221 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
222 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
223 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
224 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
225 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
226 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
227 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
228)
229
230/* all CceStatus sub-block freeze bits */
231#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
232 | CCE_STATUS_RXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
235/* all CceStatus sub-block TXE pause bits */
236#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
237 | CCE_STATUS_TXE_PAUSED_SMASK \
238 | CCE_STATUS_SDMA_PAUSED_SMASK)
239/* all CceStatus sub-block RXE pause bits */
240#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
241
242/*
243 * CCE Error flags.
244 */
245static struct flag_table cce_err_status_flags[] = {
246/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
247 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
248/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
250/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
251 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
252/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
253 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
254/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
255 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
256/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
257 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
258/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
259 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
260/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
261 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
262/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
266/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
268/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
270/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
272/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
274/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
275 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
276/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
278/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
280/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
282/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
283 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
284/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
286/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
287 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
288/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
290/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
291 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
292/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
294/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
295 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
296/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
298/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
299 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
300/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
302/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
303 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
304/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
305 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
306/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
307 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
308/*31*/ FLAG_ENTRY0("LATriggered",
309 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
310/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
312/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
313 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
314/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
315 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
316/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
317 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
318/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
320/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
321 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
322/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
324/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
325 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
326/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
327 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
328/*41-63 reserved*/
329};
330
331/*
332 * Misc Error flags
333 */
334#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
335static struct flag_table misc_err_status_flags[] = {
336/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
337/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
338/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
339/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
340/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
341/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
342/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
343/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
344/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
345/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
346/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
347/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
348/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
349};
350
351/*
352 * TXE PIO Error flags and consequences
353 */
354static struct flag_table pio_err_status_flags[] = {
355/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
356 SEC_WRITE_DROPPED,
357 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
358/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
359 SEC_SPC_FREEZE,
360 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
361/* 2*/ FLAG_ENTRY("PioCsrParity",
362 SEC_SPC_FREEZE,
363 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
364/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
365 SEC_SPC_FREEZE,
366 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
367/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
368 SEC_SPC_FREEZE,
369 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
370/* 5*/ FLAG_ENTRY("PioPccFifoParity",
371 SEC_SPC_FREEZE,
372 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
373/* 6*/ FLAG_ENTRY("PioPecFifoParity",
374 SEC_SPC_FREEZE,
375 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
376/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
377 SEC_SPC_FREEZE,
378 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
379/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
380 SEC_SPC_FREEZE,
381 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
382/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
383 SEC_SPC_FREEZE,
384 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
385/*10*/ FLAG_ENTRY("PioSmPktResetParity",
386 SEC_SPC_FREEZE,
387 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
388/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
389 SEC_SPC_FREEZE,
390 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
391/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
394/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
395 0,
396 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
397/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
398 0,
399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
400/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
403/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
406/*17*/ FLAG_ENTRY("PioInitSmIn",
407 0,
408 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
409/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
412/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
415/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
416 0,
417 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
418/*21*/ FLAG_ENTRY("PioWriteDataParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
421/*22*/ FLAG_ENTRY("PioStateMachine",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
424/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800425 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400426 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
427/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800428 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400429 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
430/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
431 SEC_SPC_FREEZE,
432 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
433/*26*/ FLAG_ENTRY("PioVlfSopParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
436/*27*/ FLAG_ENTRY("PioVlFifoParity",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
439/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
440 SEC_SPC_FREEZE,
441 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
442/*29*/ FLAG_ENTRY("PioPpmcSopLen",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
445/*30-31 reserved*/
446/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
449/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
450 SEC_SPC_FREEZE,
451 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
452/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
453 SEC_SPC_FREEZE,
454 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
455/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
456 SEC_SPC_FREEZE,
457 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
458/*36-63 reserved*/
459};
460
461/* TXE PIO errors that cause an SPC freeze */
462#define ALL_PIO_FREEZE_ERR \
463 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
492
493/*
494 * TXE SDMA Error flags
495 */
496static struct flag_table sdma_err_status_flags[] = {
497/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
498 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
499/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
500 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
501/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
503/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
504 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
505/*04-63 reserved*/
506};
507
508/* TXE SDMA errors that cause an SPC freeze */
509#define ALL_SDMA_FREEZE_ERR \
510 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
512 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
513
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800514/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
515#define PORT_DISCARD_EGRESS_ERRS \
516 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
518 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
519
Mike Marciniszyn77241052015-07-30 15:17:43 -0400520/*
521 * TXE Egress Error flags
522 */
523#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
524static struct flag_table egress_err_status_flags[] = {
525/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
526/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
527/* 2 reserved */
528/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
529 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
530/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
531/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
532/* 6 reserved */
533/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
534 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
535/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
536 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
537/* 9-10 reserved */
538/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
539 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
540/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
541/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
542/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
543/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
544/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
545 SEES(TX_SDMA0_DISALLOWED_PACKET)),
546/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
547 SEES(TX_SDMA1_DISALLOWED_PACKET)),
548/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
549 SEES(TX_SDMA2_DISALLOWED_PACKET)),
550/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
551 SEES(TX_SDMA3_DISALLOWED_PACKET)),
552/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
553 SEES(TX_SDMA4_DISALLOWED_PACKET)),
554/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
555 SEES(TX_SDMA5_DISALLOWED_PACKET)),
556/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
557 SEES(TX_SDMA6_DISALLOWED_PACKET)),
558/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
559 SEES(TX_SDMA7_DISALLOWED_PACKET)),
560/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
561 SEES(TX_SDMA8_DISALLOWED_PACKET)),
562/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
563 SEES(TX_SDMA9_DISALLOWED_PACKET)),
564/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
565 SEES(TX_SDMA10_DISALLOWED_PACKET)),
566/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
567 SEES(TX_SDMA11_DISALLOWED_PACKET)),
568/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
569 SEES(TX_SDMA12_DISALLOWED_PACKET)),
570/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
571 SEES(TX_SDMA13_DISALLOWED_PACKET)),
572/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
573 SEES(TX_SDMA14_DISALLOWED_PACKET)),
574/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
575 SEES(TX_SDMA15_DISALLOWED_PACKET)),
576/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
578/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
580/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
582/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
584/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
586/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
588/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
590/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
592/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
594/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
595/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
596/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
597/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
598/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
599/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
600/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
601/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
602/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
603/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
604/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
605/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
606/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
607/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
608/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
609/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
610/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
611/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
612/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
613/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
614/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
615/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
616 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
617/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
618 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
619};
620
621/*
622 * TXE Egress Error Info flags
623 */
624#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
625static struct flag_table egress_err_info_flags[] = {
626/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
627/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
628/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
630/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
631/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
632/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
633/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
634/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
635/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
636/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
637/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
638/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
639/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
640/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
641/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
642/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
643/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
644/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
645/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
646/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
647/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
648};
649
650/* TXE Egress errors that cause an SPC freeze */
651#define ALL_TXE_EGRESS_FREEZE_ERR \
652 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
653 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
655 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
656 | SEES(TX_LAUNCH_CSR_PARITY) \
657 | SEES(TX_SBRD_CTL_CSR_PARITY) \
658 | SEES(TX_CONFIG_PARITY) \
659 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
668 | SEES(TX_CREDIT_RETURN_PARITY))
669
670/*
671 * TXE Send error flags
672 */
673#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
674static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500675/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400676/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
677/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
678};
679
680/*
681 * TXE Send Context Error flags and consequences
682 */
683static struct flag_table sc_err_status_flags[] = {
684/* 0*/ FLAG_ENTRY("InconsistentSop",
685 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
687/* 1*/ FLAG_ENTRY("DisallowedPacket",
688 SEC_PACKET_DROPPED | SEC_SC_HALTED,
689 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
690/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
691 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
693/* 3*/ FLAG_ENTRY("WriteOverflow",
694 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
696/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
697 SEC_WRITE_DROPPED | SEC_SC_HALTED,
698 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
699/* 5-63 reserved*/
700};
701
702/*
703 * RXE Receive Error flags
704 */
705#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
706static struct flag_table rxe_err_status_flags[] = {
707/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
708/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
709/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
710/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
711/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
712/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
713/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
714/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
715/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
716/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
717/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
718/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
719/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
720/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
721/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
722/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
723/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
724 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
725/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
726/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
727/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
728 RXES(RBUF_BLOCK_LIST_READ_UNC)),
729/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
730 RXES(RBUF_BLOCK_LIST_READ_COR)),
731/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
732 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
733/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
734 RXES(RBUF_CSR_QENT_CNT_PARITY)),
735/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
736 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
737/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
738 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
739/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
740/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
741/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
742 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
743/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
744/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
745/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
746/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
747/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
748/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
749/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
750/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
751 RXES(RBUF_FL_INITDONE_PARITY)),
752/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
753 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
754/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
755/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
756/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
757/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
758 RXES(LOOKUP_DES_PART1_UNC_COR)),
759/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
760 RXES(LOOKUP_DES_PART2_PARITY)),
761/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
762/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
763/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
764/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
765/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
766/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
767/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
768/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
769/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
770/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
771/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
772/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
773/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
774/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
775/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
776/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
777/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
778/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
779/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
780/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
781/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
782/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
783};
784
785/* RXE errors that will trigger an SPC freeze */
786#define ALL_RXE_FREEZE_ERR \
787 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
831
832#define RXE_FREEZE_ABORT_MASK \
833 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
835 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
836
837/*
838 * DCC Error Flags
839 */
840#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
841static struct flag_table dcc_err_flags[] = {
842 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
843 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
844 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
845 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
847 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
848 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
849 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
850 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
851 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
852 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
853 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
854 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
855 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
856 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
857 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
858 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
859 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
860 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
861 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
862 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
863 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
864 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
865 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
866 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
867 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
868 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
869 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
870 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
871 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
872 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
873 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
875 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
876 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
877 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
878 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
879 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
880 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
881 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
882 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
883 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
884 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
886 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
887 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
888};
889
890/*
891 * LCB error flags
892 */
893#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
894static struct flag_table lcb_err_flags[] = {
895/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
896/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
897/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
898/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
899 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
900/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
901/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
902/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
903/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
904/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
905/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
906/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
907/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
908/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
909/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
910 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
911/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
912/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
913/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
914/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
915/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
916/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
917 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
918/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
919/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
920/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
921/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
922/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
923/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
924/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
925 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
926/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
927/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
928 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
929/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
930 LCBE(REDUNDANT_FLIT_PARITY_ERR))
931};
932
933/*
934 * DC8051 Error Flags
935 */
936#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
937static struct flag_table dc8051_err_flags[] = {
938 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
939 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
940 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
941 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
942 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
943 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
944 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
945 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
946 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
947 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
948 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
949};
950
951/*
952 * DC8051 Information Error flags
953 *
954 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
955 */
956static struct flag_table dc8051_info_err_flags[] = {
957 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
958 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
959 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
960 FLAG_ENTRY0("Serdes internal loopback failure",
961 FAILED_SERDES_INTERNAL_LOOPBACK),
962 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
963 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
964 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
965 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
966 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
968 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
969 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
970};
971
972/*
973 * DC8051 Information Host Information flags
974 *
975 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976 */
977static struct flag_table dc8051_info_host_msg_flags[] = {
978 FLAG_ENTRY0("Host request done", 0x0001),
979 FLAG_ENTRY0("BC SMA message", 0x0002),
980 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983 FLAG_ENTRY0("External device config request", 0x0020),
984 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985 FLAG_ENTRY0("LinkUp achieved", 0x0080),
986 FLAG_ENTRY0("Link going down", 0x0100),
987};
988
Mike Marciniszyn77241052015-07-30 15:17:43 -0400989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1034static void handle_temp_err(struct hfi1_devdata *);
1035static void dc_shutdown(struct hfi1_devdata *);
1036static void dc_start(struct hfi1_devdata *);
1037
1038/*
1039 * Error interrupt table entry. This is used as input to the interrupt
1040 * "clear down" routine used for all second tier error interrupt register.
1041 * Second tier interrupt registers have a single bit representing them
1042 * in the top-level CceIntStatus.
1043 */
1044struct err_reg_info {
1045 u32 status; /* status CSR offset */
1046 u32 clear; /* clear CSR offset */
1047 u32 mask; /* mask CSR offset */
1048 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1049 const char *desc;
1050};
1051
1052#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1053#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1054#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1055
1056/*
1057 * Helpers for building HFI and DC error interrupt table entries. Different
1058 * helpers are needed because of inconsistent register names.
1059 */
1060#define EE(reg, handler, desc) \
1061 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1062 handler, desc }
1063#define DC_EE1(reg, handler, desc) \
1064 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1065#define DC_EE2(reg, handler, desc) \
1066 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1067
1068/*
1069 * Table of the "misc" grouping of error interrupts. Each entry refers to
1070 * another register containing more information.
1071 */
1072static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1073/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1074/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1075/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1076/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1077/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1078/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1079/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1080/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1081 /* the rest are reserved */
1082};
1083
1084/*
1085 * Index into the Various section of the interrupt sources
1086 * corresponding to the Critical Temperature interrupt.
1087 */
1088#define TCRIT_INT_SOURCE 4
1089
1090/*
1091 * SDMA error interrupt entry - refers to another register containing more
1092 * information.
1093 */
1094static const struct err_reg_info sdma_eng_err =
1095 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1096
1097static const struct err_reg_info various_err[NUM_VARIOUS] = {
1098/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1099/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1100/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1101/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1102/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1103 /* rest are reserved */
1104};
1105
1106/*
1107 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1108 * register can not be derived from the MTU value because 10K is not
1109 * a power of 2. Therefore, we need a constant. Everything else can
1110 * be calculated.
1111 */
1112#define DCC_CFG_PORT_MTU_CAP_10240 7
1113
1114/*
1115 * Table of the DC grouping of error interrupts. Each entry refers to
1116 * another register containing more information.
1117 */
1118static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1119/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1120/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1121/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1122/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1123 /* the rest are reserved */
1124};
1125
1126struct cntr_entry {
1127 /*
1128 * counter name
1129 */
1130 char *name;
1131
1132 /*
1133 * csr to read for name (if applicable)
1134 */
1135 u64 csr;
1136
1137 /*
1138 * offset into dd or ppd to store the counter's value
1139 */
1140 int offset;
1141
1142 /*
1143 * flags
1144 */
1145 u8 flags;
1146
1147 /*
1148 * accessor for stat element, context either dd or ppd
1149 */
1150 u64 (*rw_cntr)(const struct cntr_entry *,
1151 void *context,
1152 int vl,
1153 int mode,
1154 u64 data);
1155};
1156
1157#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1158#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1159
1160#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1161{ \
1162 name, \
1163 csr, \
1164 offset, \
1165 flags, \
1166 accessor \
1167}
1168
1169/* 32bit RXE */
1170#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 port_access_u32_csr)
1175
1176#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY32), \
1179 0, flags | CNTR_32BIT, \
1180 dev_access_u32_csr)
1181
1182/* 64bit RXE */
1183#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 port_access_u64_csr)
1188
1189#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1190CNTR_ELEM(#name, \
1191 (counter * 8 + RCV_COUNTER_ARRAY64), \
1192 0, flags, \
1193 dev_access_u64_csr)
1194
1195#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1196#define OVR_ELM(ctx) \
1197CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001198 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001199 0, CNTR_NORMAL, port_access_u64_csr)
1200
1201/* 32bit TXE */
1202#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY32), \
1205 0, flags | CNTR_32BIT, \
1206 port_access_u32_csr)
1207
1208/* 64bit TXE */
1209#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name, \
1211 (counter * 8 + SEND_COUNTER_ARRAY64), \
1212 0, flags, \
1213 port_access_u64_csr)
1214
1215# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1216CNTR_ELEM(#name,\
1217 counter * 8 + SEND_COUNTER_ARRAY64, \
1218 0, \
1219 flags, \
1220 dev_access_u64_csr)
1221
1222/* CCE */
1223#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1232 0, flags | CNTR_32BIT, \
1233 dev_access_u32_csr)
1234
1235/* DC */
1236#define DC_PERF_CNTR(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dev_access_u64_csr)
1242
1243#define DC_PERF_CNTR_LCB(name, counter, flags) \
1244CNTR_ELEM(#name, \
1245 counter, \
1246 0, \
1247 flags, \
1248 dc_access_lcb_cntr)
1249
1250/* ibp counters */
1251#define SW_IBP_CNTR(name, cntr) \
1252CNTR_ELEM(#name, \
1253 0, \
1254 0, \
1255 CNTR_SYNTH, \
1256 access_ibp_##cntr)
1257
1258u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1259{
1260 u64 val;
1261
1262 if (dd->flags & HFI1_PRESENT) {
1263 val = readq((void __iomem *)dd->kregbase + offset);
1264 return val;
1265 }
1266 return -1;
1267}
1268
1269void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1270{
1271 if (dd->flags & HFI1_PRESENT)
1272 writeq(value, (void __iomem *)dd->kregbase + offset);
1273}
1274
1275void __iomem *get_csr_addr(
1276 struct hfi1_devdata *dd,
1277 u32 offset)
1278{
1279 return (void __iomem *)dd->kregbase + offset;
1280}
1281
1282static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1283 int mode, u64 value)
1284{
1285 u64 ret;
1286
Mike Marciniszyn77241052015-07-30 15:17:43 -04001287 if (mode == CNTR_MODE_R) {
1288 ret = read_csr(dd, csr);
1289 } else if (mode == CNTR_MODE_W) {
1290 write_csr(dd, csr, value);
1291 ret = value;
1292 } else {
1293 dd_dev_err(dd, "Invalid cntr register access mode");
1294 return 0;
1295 }
1296
1297 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1298 return ret;
1299}
1300
1301/* Dev Access */
1302static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1303 void *context, int vl, int mode, u64 data)
1304{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301305 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001306 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001307
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001308 if (entry->flags & CNTR_SDMA) {
1309 if (vl == CNTR_INVALID_VL)
1310 return 0;
1311 csr += 0x100 * vl;
1312 } else {
1313 if (vl != CNTR_INVALID_VL)
1314 return 0;
1315 }
1316 return read_write_csr(dd, csr, mode, data);
1317}
1318
1319static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1320 void *context, int idx, int mode, u64 data)
1321{
1322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1323
1324 if (dd->per_sdma && idx < dd->num_sdma)
1325 return dd->per_sdma[idx].err_cnt;
1326 return 0;
1327}
1328
1329static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1330 void *context, int idx, int mode, u64 data)
1331{
1332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1333
1334 if (dd->per_sdma && idx < dd->num_sdma)
1335 return dd->per_sdma[idx].sdma_int_cnt;
1336 return 0;
1337}
1338
1339static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1340 void *context, int idx, int mode, u64 data)
1341{
1342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1343
1344 if (dd->per_sdma && idx < dd->num_sdma)
1345 return dd->per_sdma[idx].idle_int_cnt;
1346 return 0;
1347}
1348
1349static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1350 void *context, int idx, int mode,
1351 u64 data)
1352{
1353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1354
1355 if (dd->per_sdma && idx < dd->num_sdma)
1356 return dd->per_sdma[idx].progress_int_cnt;
1357 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358}
1359
1360static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1361 int vl, int mode, u64 data)
1362{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301363 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001364
1365 u64 val = 0;
1366 u64 csr = entry->csr;
1367
1368 if (entry->flags & CNTR_VL) {
1369 if (vl == CNTR_INVALID_VL)
1370 return 0;
1371 csr += 8 * vl;
1372 } else {
1373 if (vl != CNTR_INVALID_VL)
1374 return 0;
1375 }
1376
1377 val = read_write_csr(dd, csr, mode, data);
1378 return val;
1379}
1380
1381static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1382 int vl, int mode, u64 data)
1383{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301384 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001385 u32 csr = entry->csr;
1386 int ret = 0;
1387
1388 if (vl != CNTR_INVALID_VL)
1389 return 0;
1390 if (mode == CNTR_MODE_R)
1391 ret = read_lcb_csr(dd, csr, &data);
1392 else if (mode == CNTR_MODE_W)
1393 ret = write_lcb_csr(dd, csr, data);
1394
1395 if (ret) {
1396 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1397 return 0;
1398 }
1399
1400 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1401 return data;
1402}
1403
1404/* Port Access */
1405static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1406 int vl, int mode, u64 data)
1407{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301408 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001409
1410 if (vl != CNTR_INVALID_VL)
1411 return 0;
1412 return read_write_csr(ppd->dd, entry->csr, mode, data);
1413}
1414
1415static u64 port_access_u64_csr(const struct cntr_entry *entry,
1416 void *context, int vl, int mode, u64 data)
1417{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301418 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001419 u64 val;
1420 u64 csr = entry->csr;
1421
1422 if (entry->flags & CNTR_VL) {
1423 if (vl == CNTR_INVALID_VL)
1424 return 0;
1425 csr += 8 * vl;
1426 } else {
1427 if (vl != CNTR_INVALID_VL)
1428 return 0;
1429 }
1430 val = read_write_csr(ppd->dd, csr, mode, data);
1431 return val;
1432}
1433
1434/* Software defined */
1435static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1436 u64 data)
1437{
1438 u64 ret;
1439
1440 if (mode == CNTR_MODE_R) {
1441 ret = *cntr;
1442 } else if (mode == CNTR_MODE_W) {
1443 *cntr = data;
1444 ret = data;
1445 } else {
1446 dd_dev_err(dd, "Invalid cntr sw access mode");
1447 return 0;
1448 }
1449
1450 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1451
1452 return ret;
1453}
1454
1455static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1456 int vl, int mode, u64 data)
1457{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301458 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001459
1460 if (vl != CNTR_INVALID_VL)
1461 return 0;
1462 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1463}
1464
1465static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1466 int vl, int mode, u64 data)
1467{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301468 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001469
1470 if (vl != CNTR_INVALID_VL)
1471 return 0;
1472 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1473}
1474
Dean Luick6d014532015-12-01 15:38:23 -05001475static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1476 void *context, int vl, int mode,
1477 u64 data)
1478{
1479 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1480
1481 if (vl != CNTR_INVALID_VL)
1482 return 0;
1483 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1484}
1485
Mike Marciniszyn77241052015-07-30 15:17:43 -04001486static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1487 void *context, int vl, int mode, u64 data)
1488{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001489 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1490 u64 zero = 0;
1491 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001492
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001493 if (vl == CNTR_INVALID_VL)
1494 counter = &ppd->port_xmit_discards;
1495 else if (vl >= 0 && vl < C_VL_COUNT)
1496 counter = &ppd->port_xmit_discards_vl[vl];
1497 else
1498 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001500 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001501}
1502
1503static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1504 void *context, int vl, int mode, u64 data)
1505{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301506 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001507
1508 if (vl != CNTR_INVALID_VL)
1509 return 0;
1510
1511 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1512 mode, data);
1513}
1514
1515static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1516 void *context, int vl, int mode, u64 data)
1517{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301518 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001519
1520 if (vl != CNTR_INVALID_VL)
1521 return 0;
1522
1523 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1524 mode, data);
1525}
1526
1527u64 get_all_cpu_total(u64 __percpu *cntr)
1528{
1529 int cpu;
1530 u64 counter = 0;
1531
1532 for_each_possible_cpu(cpu)
1533 counter += *per_cpu_ptr(cntr, cpu);
1534 return counter;
1535}
1536
1537static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1538 u64 __percpu *cntr,
1539 int vl, int mode, u64 data)
1540{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001541 u64 ret = 0;
1542
1543 if (vl != CNTR_INVALID_VL)
1544 return 0;
1545
1546 if (mode == CNTR_MODE_R) {
1547 ret = get_all_cpu_total(cntr) - *z_val;
1548 } else if (mode == CNTR_MODE_W) {
1549 /* A write can only zero the counter */
1550 if (data == 0)
1551 *z_val = get_all_cpu_total(cntr);
1552 else
1553 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1554 } else {
1555 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1556 return 0;
1557 }
1558
1559 return ret;
1560}
1561
1562static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1563 void *context, int vl, int mode, u64 data)
1564{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301565 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001566
1567 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1568 mode, data);
1569}
1570
1571static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1572 void *context, int vl, int mode, u64 data)
1573{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301574 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001575
1576 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1577 mode, data);
1578}
1579
1580static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1581 void *context, int vl, int mode, u64 data)
1582{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301583 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001584
1585 return dd->verbs_dev.n_piowait;
1586}
1587
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001588static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1589 void *context, int vl, int mode, u64 data)
1590{
1591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1592
1593 return dd->verbs_dev.n_piodrain;
1594}
1595
Mike Marciniszyn77241052015-07-30 15:17:43 -04001596static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1597 void *context, int vl, int mode, u64 data)
1598{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301599 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001600
1601 return dd->verbs_dev.n_txwait;
1602}
1603
1604static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1605 void *context, int vl, int mode, u64 data)
1606{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301607 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001608
1609 return dd->verbs_dev.n_kmem_wait;
1610}
1611
Dean Luickb4219222015-10-26 10:28:35 -04001612static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1613 void *context, int vl, int mode, u64 data)
1614{
1615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1616
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001617 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1618 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001619}
1620
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001621/* Software counters for the error status bits within MISC_ERR_STATUS */
1622static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1623 void *context, int vl, int mode,
1624 u64 data)
1625{
1626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1627
1628 return dd->misc_err_status_cnt[12];
1629}
1630
1631static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1632 void *context, int vl, int mode,
1633 u64 data)
1634{
1635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1636
1637 return dd->misc_err_status_cnt[11];
1638}
1639
1640static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1641 void *context, int vl, int mode,
1642 u64 data)
1643{
1644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1645
1646 return dd->misc_err_status_cnt[10];
1647}
1648
1649static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1650 void *context, int vl,
1651 int mode, u64 data)
1652{
1653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1654
1655 return dd->misc_err_status_cnt[9];
1656}
1657
1658static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1659 void *context, int vl, int mode,
1660 u64 data)
1661{
1662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1663
1664 return dd->misc_err_status_cnt[8];
1665}
1666
1667static u64 access_misc_efuse_read_bad_addr_err_cnt(
1668 const struct cntr_entry *entry,
1669 void *context, int vl, int mode, u64 data)
1670{
1671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1672
1673 return dd->misc_err_status_cnt[7];
1674}
1675
1676static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1677 void *context, int vl,
1678 int mode, u64 data)
1679{
1680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1681
1682 return dd->misc_err_status_cnt[6];
1683}
1684
1685static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1686 void *context, int vl, int mode,
1687 u64 data)
1688{
1689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1690
1691 return dd->misc_err_status_cnt[5];
1692}
1693
1694static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1695 void *context, int vl, int mode,
1696 u64 data)
1697{
1698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1699
1700 return dd->misc_err_status_cnt[4];
1701}
1702
1703static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1704 void *context, int vl,
1705 int mode, u64 data)
1706{
1707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709 return dd->misc_err_status_cnt[3];
1710}
1711
1712static u64 access_misc_csr_write_bad_addr_err_cnt(
1713 const struct cntr_entry *entry,
1714 void *context, int vl, int mode, u64 data)
1715{
1716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1717
1718 return dd->misc_err_status_cnt[2];
1719}
1720
1721static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1722 void *context, int vl,
1723 int mode, u64 data)
1724{
1725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1726
1727 return dd->misc_err_status_cnt[1];
1728}
1729
1730static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1731 void *context, int vl, int mode,
1732 u64 data)
1733{
1734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1735
1736 return dd->misc_err_status_cnt[0];
1737}
1738
1739/*
1740 * Software counter for the aggregate of
1741 * individual CceErrStatus counters
1742 */
1743static u64 access_sw_cce_err_status_aggregated_cnt(
1744 const struct cntr_entry *entry,
1745 void *context, int vl, int mode, u64 data)
1746{
1747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1748
1749 return dd->sw_cce_err_status_aggregate;
1750}
1751
1752/*
1753 * Software counters corresponding to each of the
1754 * error status bits within CceErrStatus
1755 */
1756static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1757 void *context, int vl, int mode,
1758 u64 data)
1759{
1760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1761
1762 return dd->cce_err_status_cnt[40];
1763}
1764
1765static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1766 void *context, int vl, int mode,
1767 u64 data)
1768{
1769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1770
1771 return dd->cce_err_status_cnt[39];
1772}
1773
1774static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1775 void *context, int vl, int mode,
1776 u64 data)
1777{
1778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1779
1780 return dd->cce_err_status_cnt[38];
1781}
1782
1783static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1784 void *context, int vl, int mode,
1785 u64 data)
1786{
1787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1788
1789 return dd->cce_err_status_cnt[37];
1790}
1791
1792static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1793 void *context, int vl, int mode,
1794 u64 data)
1795{
1796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1797
1798 return dd->cce_err_status_cnt[36];
1799}
1800
1801static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1802 const struct cntr_entry *entry,
1803 void *context, int vl, int mode, u64 data)
1804{
1805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1806
1807 return dd->cce_err_status_cnt[35];
1808}
1809
1810static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1811 const struct cntr_entry *entry,
1812 void *context, int vl, int mode, u64 data)
1813{
1814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1815
1816 return dd->cce_err_status_cnt[34];
1817}
1818
1819static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1820 void *context, int vl,
1821 int mode, u64 data)
1822{
1823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1824
1825 return dd->cce_err_status_cnt[33];
1826}
1827
1828static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1829 void *context, int vl, int mode,
1830 u64 data)
1831{
1832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1833
1834 return dd->cce_err_status_cnt[32];
1835}
1836
1837static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1838 void *context, int vl, int mode, u64 data)
1839{
1840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1841
1842 return dd->cce_err_status_cnt[31];
1843}
1844
1845static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1846 void *context, int vl, int mode,
1847 u64 data)
1848{
1849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1850
1851 return dd->cce_err_status_cnt[30];
1852}
1853
1854static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1855 void *context, int vl, int mode,
1856 u64 data)
1857{
1858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1859
1860 return dd->cce_err_status_cnt[29];
1861}
1862
1863static u64 access_pcic_transmit_back_parity_err_cnt(
1864 const struct cntr_entry *entry,
1865 void *context, int vl, int mode, u64 data)
1866{
1867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1868
1869 return dd->cce_err_status_cnt[28];
1870}
1871
1872static u64 access_pcic_transmit_front_parity_err_cnt(
1873 const struct cntr_entry *entry,
1874 void *context, int vl, int mode, u64 data)
1875{
1876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1877
1878 return dd->cce_err_status_cnt[27];
1879}
1880
1881static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1882 void *context, int vl, int mode,
1883 u64 data)
1884{
1885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1886
1887 return dd->cce_err_status_cnt[26];
1888}
1889
1890static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1891 void *context, int vl, int mode,
1892 u64 data)
1893{
1894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1895
1896 return dd->cce_err_status_cnt[25];
1897}
1898
1899static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1900 void *context, int vl, int mode,
1901 u64 data)
1902{
1903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1904
1905 return dd->cce_err_status_cnt[24];
1906}
1907
1908static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1909 void *context, int vl, int mode,
1910 u64 data)
1911{
1912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1913
1914 return dd->cce_err_status_cnt[23];
1915}
1916
1917static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1918 void *context, int vl,
1919 int mode, u64 data)
1920{
1921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1922
1923 return dd->cce_err_status_cnt[22];
1924}
1925
1926static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1927 void *context, int vl, int mode,
1928 u64 data)
1929{
1930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1931
1932 return dd->cce_err_status_cnt[21];
1933}
1934
1935static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1936 const struct cntr_entry *entry,
1937 void *context, int vl, int mode, u64 data)
1938{
1939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1940
1941 return dd->cce_err_status_cnt[20];
1942}
1943
1944static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1945 void *context, int vl,
1946 int mode, u64 data)
1947{
1948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1949
1950 return dd->cce_err_status_cnt[19];
1951}
1952
1953static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1954 void *context, int vl, int mode,
1955 u64 data)
1956{
1957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1958
1959 return dd->cce_err_status_cnt[18];
1960}
1961
1962static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1963 void *context, int vl, int mode,
1964 u64 data)
1965{
1966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1967
1968 return dd->cce_err_status_cnt[17];
1969}
1970
1971static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1972 void *context, int vl, int mode,
1973 u64 data)
1974{
1975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1976
1977 return dd->cce_err_status_cnt[16];
1978}
1979
1980static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1981 void *context, int vl, int mode,
1982 u64 data)
1983{
1984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1985
1986 return dd->cce_err_status_cnt[15];
1987}
1988
1989static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1990 void *context, int vl,
1991 int mode, u64 data)
1992{
1993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1994
1995 return dd->cce_err_status_cnt[14];
1996}
1997
1998static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1999 void *context, int vl, int mode,
2000 u64 data)
2001{
2002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2003
2004 return dd->cce_err_status_cnt[13];
2005}
2006
2007static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2008 const struct cntr_entry *entry,
2009 void *context, int vl, int mode, u64 data)
2010{
2011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2012
2013 return dd->cce_err_status_cnt[12];
2014}
2015
2016static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2017 const struct cntr_entry *entry,
2018 void *context, int vl, int mode, u64 data)
2019{
2020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2021
2022 return dd->cce_err_status_cnt[11];
2023}
2024
2025static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2026 const struct cntr_entry *entry,
2027 void *context, int vl, int mode, u64 data)
2028{
2029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2030
2031 return dd->cce_err_status_cnt[10];
2032}
2033
2034static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2035 const struct cntr_entry *entry,
2036 void *context, int vl, int mode, u64 data)
2037{
2038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2039
2040 return dd->cce_err_status_cnt[9];
2041}
2042
2043static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2044 const struct cntr_entry *entry,
2045 void *context, int vl, int mode, u64 data)
2046{
2047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2048
2049 return dd->cce_err_status_cnt[8];
2050}
2051
2052static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2053 void *context, int vl,
2054 int mode, u64 data)
2055{
2056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2057
2058 return dd->cce_err_status_cnt[7];
2059}
2060
2061static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2062 const struct cntr_entry *entry,
2063 void *context, int vl, int mode, u64 data)
2064{
2065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2066
2067 return dd->cce_err_status_cnt[6];
2068}
2069
2070static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2071 void *context, int vl, int mode,
2072 u64 data)
2073{
2074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2075
2076 return dd->cce_err_status_cnt[5];
2077}
2078
2079static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2080 void *context, int vl, int mode,
2081 u64 data)
2082{
2083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2084
2085 return dd->cce_err_status_cnt[4];
2086}
2087
2088static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2089 const struct cntr_entry *entry,
2090 void *context, int vl, int mode, u64 data)
2091{
2092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2093
2094 return dd->cce_err_status_cnt[3];
2095}
2096
2097static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2098 void *context, int vl,
2099 int mode, u64 data)
2100{
2101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2102
2103 return dd->cce_err_status_cnt[2];
2104}
2105
2106static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2107 void *context, int vl,
2108 int mode, u64 data)
2109{
2110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2111
2112 return dd->cce_err_status_cnt[1];
2113}
2114
2115static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2116 void *context, int vl, int mode,
2117 u64 data)
2118{
2119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2120
2121 return dd->cce_err_status_cnt[0];
2122}
2123
2124/*
2125 * Software counters corresponding to each of the
2126 * error status bits within RcvErrStatus
2127 */
2128static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2129 void *context, int vl, int mode,
2130 u64 data)
2131{
2132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2133
2134 return dd->rcv_err_status_cnt[63];
2135}
2136
2137static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2138 void *context, int vl,
2139 int mode, u64 data)
2140{
2141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2142
2143 return dd->rcv_err_status_cnt[62];
2144}
2145
2146static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2147 void *context, int vl, int mode,
2148 u64 data)
2149{
2150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2151
2152 return dd->rcv_err_status_cnt[61];
2153}
2154
2155static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2156 void *context, int vl, int mode,
2157 u64 data)
2158{
2159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2160
2161 return dd->rcv_err_status_cnt[60];
2162}
2163
2164static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2165 void *context, int vl,
2166 int mode, u64 data)
2167{
2168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2169
2170 return dd->rcv_err_status_cnt[59];
2171}
2172
2173static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2174 void *context, int vl,
2175 int mode, u64 data)
2176{
2177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2178
2179 return dd->rcv_err_status_cnt[58];
2180}
2181
2182static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2183 void *context, int vl, int mode,
2184 u64 data)
2185{
2186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2187
2188 return dd->rcv_err_status_cnt[57];
2189}
2190
2191static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2192 void *context, int vl, int mode,
2193 u64 data)
2194{
2195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2196
2197 return dd->rcv_err_status_cnt[56];
2198}
2199
2200static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2201 void *context, int vl, int mode,
2202 u64 data)
2203{
2204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2205
2206 return dd->rcv_err_status_cnt[55];
2207}
2208
2209static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2210 const struct cntr_entry *entry,
2211 void *context, int vl, int mode, u64 data)
2212{
2213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2214
2215 return dd->rcv_err_status_cnt[54];
2216}
2217
2218static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2219 const struct cntr_entry *entry,
2220 void *context, int vl, int mode, u64 data)
2221{
2222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2223
2224 return dd->rcv_err_status_cnt[53];
2225}
2226
2227static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2228 void *context, int vl,
2229 int mode, u64 data)
2230{
2231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2232
2233 return dd->rcv_err_status_cnt[52];
2234}
2235
2236static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2237 void *context, int vl,
2238 int mode, u64 data)
2239{
2240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2241
2242 return dd->rcv_err_status_cnt[51];
2243}
2244
2245static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2246 void *context, int vl,
2247 int mode, u64 data)
2248{
2249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2250
2251 return dd->rcv_err_status_cnt[50];
2252}
2253
2254static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2255 void *context, int vl,
2256 int mode, u64 data)
2257{
2258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2259
2260 return dd->rcv_err_status_cnt[49];
2261}
2262
2263static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2264 void *context, int vl,
2265 int mode, u64 data)
2266{
2267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2268
2269 return dd->rcv_err_status_cnt[48];
2270}
2271
2272static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2273 void *context, int vl,
2274 int mode, u64 data)
2275{
2276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2277
2278 return dd->rcv_err_status_cnt[47];
2279}
2280
2281static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2282 void *context, int vl, int mode,
2283 u64 data)
2284{
2285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2286
2287 return dd->rcv_err_status_cnt[46];
2288}
2289
2290static u64 access_rx_hq_intr_csr_parity_err_cnt(
2291 const struct cntr_entry *entry,
2292 void *context, int vl, int mode, u64 data)
2293{
2294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2295
2296 return dd->rcv_err_status_cnt[45];
2297}
2298
2299static u64 access_rx_lookup_csr_parity_err_cnt(
2300 const struct cntr_entry *entry,
2301 void *context, int vl, int mode, u64 data)
2302{
2303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2304
2305 return dd->rcv_err_status_cnt[44];
2306}
2307
2308static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2309 const struct cntr_entry *entry,
2310 void *context, int vl, int mode, u64 data)
2311{
2312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2313
2314 return dd->rcv_err_status_cnt[43];
2315}
2316
2317static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2318 const struct cntr_entry *entry,
2319 void *context, int vl, int mode, u64 data)
2320{
2321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2322
2323 return dd->rcv_err_status_cnt[42];
2324}
2325
2326static u64 access_rx_lookup_des_part2_parity_err_cnt(
2327 const struct cntr_entry *entry,
2328 void *context, int vl, int mode, u64 data)
2329{
2330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2331
2332 return dd->rcv_err_status_cnt[41];
2333}
2334
2335static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2336 const struct cntr_entry *entry,
2337 void *context, int vl, int mode, u64 data)
2338{
2339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2340
2341 return dd->rcv_err_status_cnt[40];
2342}
2343
2344static u64 access_rx_lookup_des_part1_unc_err_cnt(
2345 const struct cntr_entry *entry,
2346 void *context, int vl, int mode, u64 data)
2347{
2348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2349
2350 return dd->rcv_err_status_cnt[39];
2351}
2352
2353static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2354 const struct cntr_entry *entry,
2355 void *context, int vl, int mode, u64 data)
2356{
2357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2358
2359 return dd->rcv_err_status_cnt[38];
2360}
2361
2362static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2363 const struct cntr_entry *entry,
2364 void *context, int vl, int mode, u64 data)
2365{
2366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2367
2368 return dd->rcv_err_status_cnt[37];
2369}
2370
2371static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2372 const struct cntr_entry *entry,
2373 void *context, int vl, int mode, u64 data)
2374{
2375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2376
2377 return dd->rcv_err_status_cnt[36];
2378}
2379
2380static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2381 const struct cntr_entry *entry,
2382 void *context, int vl, int mode, u64 data)
2383{
2384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2385
2386 return dd->rcv_err_status_cnt[35];
2387}
2388
2389static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2390 const struct cntr_entry *entry,
2391 void *context, int vl, int mode, u64 data)
2392{
2393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2394
2395 return dd->rcv_err_status_cnt[34];
2396}
2397
2398static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2399 const struct cntr_entry *entry,
2400 void *context, int vl, int mode, u64 data)
2401{
2402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2403
2404 return dd->rcv_err_status_cnt[33];
2405}
2406
2407static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2408 void *context, int vl, int mode,
2409 u64 data)
2410{
2411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2412
2413 return dd->rcv_err_status_cnt[32];
2414}
2415
2416static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2417 void *context, int vl, int mode,
2418 u64 data)
2419{
2420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2421
2422 return dd->rcv_err_status_cnt[31];
2423}
2424
2425static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2426 void *context, int vl, int mode,
2427 u64 data)
2428{
2429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2430
2431 return dd->rcv_err_status_cnt[30];
2432}
2433
2434static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2435 void *context, int vl, int mode,
2436 u64 data)
2437{
2438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2439
2440 return dd->rcv_err_status_cnt[29];
2441}
2442
2443static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2444 void *context, int vl,
2445 int mode, u64 data)
2446{
2447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2448
2449 return dd->rcv_err_status_cnt[28];
2450}
2451
2452static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2453 const struct cntr_entry *entry,
2454 void *context, int vl, int mode, u64 data)
2455{
2456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2457
2458 return dd->rcv_err_status_cnt[27];
2459}
2460
2461static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2462 const struct cntr_entry *entry,
2463 void *context, int vl, int mode, u64 data)
2464{
2465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2466
2467 return dd->rcv_err_status_cnt[26];
2468}
2469
2470static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2471 const struct cntr_entry *entry,
2472 void *context, int vl, int mode, u64 data)
2473{
2474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2475
2476 return dd->rcv_err_status_cnt[25];
2477}
2478
2479static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2480 const struct cntr_entry *entry,
2481 void *context, int vl, int mode, u64 data)
2482{
2483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2484
2485 return dd->rcv_err_status_cnt[24];
2486}
2487
2488static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2489 const struct cntr_entry *entry,
2490 void *context, int vl, int mode, u64 data)
2491{
2492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2493
2494 return dd->rcv_err_status_cnt[23];
2495}
2496
2497static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2498 const struct cntr_entry *entry,
2499 void *context, int vl, int mode, u64 data)
2500{
2501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2502
2503 return dd->rcv_err_status_cnt[22];
2504}
2505
2506static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2507 const struct cntr_entry *entry,
2508 void *context, int vl, int mode, u64 data)
2509{
2510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2511
2512 return dd->rcv_err_status_cnt[21];
2513}
2514
2515static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2516 const struct cntr_entry *entry,
2517 void *context, int vl, int mode, u64 data)
2518{
2519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2520
2521 return dd->rcv_err_status_cnt[20];
2522}
2523
2524static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2525 const struct cntr_entry *entry,
2526 void *context, int vl, int mode, u64 data)
2527{
2528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2529
2530 return dd->rcv_err_status_cnt[19];
2531}
2532
2533static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2534 void *context, int vl,
2535 int mode, u64 data)
2536{
2537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2538
2539 return dd->rcv_err_status_cnt[18];
2540}
2541
2542static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2543 void *context, int vl,
2544 int mode, u64 data)
2545{
2546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2547
2548 return dd->rcv_err_status_cnt[17];
2549}
2550
2551static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2552 const struct cntr_entry *entry,
2553 void *context, int vl, int mode, u64 data)
2554{
2555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2556
2557 return dd->rcv_err_status_cnt[16];
2558}
2559
2560static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2561 const struct cntr_entry *entry,
2562 void *context, int vl, int mode, u64 data)
2563{
2564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2565
2566 return dd->rcv_err_status_cnt[15];
2567}
2568
2569static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2570 void *context, int vl,
2571 int mode, u64 data)
2572{
2573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2574
2575 return dd->rcv_err_status_cnt[14];
2576}
2577
2578static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2579 void *context, int vl,
2580 int mode, u64 data)
2581{
2582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2583
2584 return dd->rcv_err_status_cnt[13];
2585}
2586
2587static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2588 void *context, int vl, int mode,
2589 u64 data)
2590{
2591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2592
2593 return dd->rcv_err_status_cnt[12];
2594}
2595
2596static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2597 void *context, int vl, int mode,
2598 u64 data)
2599{
2600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2601
2602 return dd->rcv_err_status_cnt[11];
2603}
2604
2605static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2606 void *context, int vl, int mode,
2607 u64 data)
2608{
2609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2610
2611 return dd->rcv_err_status_cnt[10];
2612}
2613
2614static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2615 void *context, int vl, int mode,
2616 u64 data)
2617{
2618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2619
2620 return dd->rcv_err_status_cnt[9];
2621}
2622
2623static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2624 void *context, int vl, int mode,
2625 u64 data)
2626{
2627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2628
2629 return dd->rcv_err_status_cnt[8];
2630}
2631
2632static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2633 const struct cntr_entry *entry,
2634 void *context, int vl, int mode, u64 data)
2635{
2636 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2637
2638 return dd->rcv_err_status_cnt[7];
2639}
2640
2641static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2642 const struct cntr_entry *entry,
2643 void *context, int vl, int mode, u64 data)
2644{
2645 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2646
2647 return dd->rcv_err_status_cnt[6];
2648}
2649
2650static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2651 void *context, int vl, int mode,
2652 u64 data)
2653{
2654 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2655
2656 return dd->rcv_err_status_cnt[5];
2657}
2658
2659static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2660 void *context, int vl, int mode,
2661 u64 data)
2662{
2663 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2664
2665 return dd->rcv_err_status_cnt[4];
2666}
2667
2668static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2669 void *context, int vl, int mode,
2670 u64 data)
2671{
2672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2673
2674 return dd->rcv_err_status_cnt[3];
2675}
2676
2677static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2678 void *context, int vl, int mode,
2679 u64 data)
2680{
2681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2682
2683 return dd->rcv_err_status_cnt[2];
2684}
2685
2686static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2687 void *context, int vl, int mode,
2688 u64 data)
2689{
2690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2691
2692 return dd->rcv_err_status_cnt[1];
2693}
2694
2695static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2696 void *context, int vl, int mode,
2697 u64 data)
2698{
2699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2700
2701 return dd->rcv_err_status_cnt[0];
2702}
2703
2704/*
2705 * Software counters corresponding to each of the
2706 * error status bits within SendPioErrStatus
2707 */
2708static u64 access_pio_pec_sop_head_parity_err_cnt(
2709 const struct cntr_entry *entry,
2710 void *context, int vl, int mode, u64 data)
2711{
2712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2713
2714 return dd->send_pio_err_status_cnt[35];
2715}
2716
2717static u64 access_pio_pcc_sop_head_parity_err_cnt(
2718 const struct cntr_entry *entry,
2719 void *context, int vl, int mode, u64 data)
2720{
2721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2722
2723 return dd->send_pio_err_status_cnt[34];
2724}
2725
2726static u64 access_pio_last_returned_cnt_parity_err_cnt(
2727 const struct cntr_entry *entry,
2728 void *context, int vl, int mode, u64 data)
2729{
2730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2731
2732 return dd->send_pio_err_status_cnt[33];
2733}
2734
2735static u64 access_pio_current_free_cnt_parity_err_cnt(
2736 const struct cntr_entry *entry,
2737 void *context, int vl, int mode, u64 data)
2738{
2739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2740
2741 return dd->send_pio_err_status_cnt[32];
2742}
2743
2744static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2745 void *context, int vl, int mode,
2746 u64 data)
2747{
2748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2749
2750 return dd->send_pio_err_status_cnt[31];
2751}
2752
2753static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2754 void *context, int vl, int mode,
2755 u64 data)
2756{
2757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2758
2759 return dd->send_pio_err_status_cnt[30];
2760}
2761
2762static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2763 void *context, int vl, int mode,
2764 u64 data)
2765{
2766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2767
2768 return dd->send_pio_err_status_cnt[29];
2769}
2770
2771static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2772 const struct cntr_entry *entry,
2773 void *context, int vl, int mode, u64 data)
2774{
2775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2776
2777 return dd->send_pio_err_status_cnt[28];
2778}
2779
2780static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2781 void *context, int vl, int mode,
2782 u64 data)
2783{
2784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2785
2786 return dd->send_pio_err_status_cnt[27];
2787}
2788
2789static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2790 void *context, int vl, int mode,
2791 u64 data)
2792{
2793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2794
2795 return dd->send_pio_err_status_cnt[26];
2796}
2797
2798static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2799 void *context, int vl,
2800 int mode, u64 data)
2801{
2802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2803
2804 return dd->send_pio_err_status_cnt[25];
2805}
2806
2807static u64 access_pio_block_qw_count_parity_err_cnt(
2808 const struct cntr_entry *entry,
2809 void *context, int vl, int mode, u64 data)
2810{
2811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2812
2813 return dd->send_pio_err_status_cnt[24];
2814}
2815
2816static u64 access_pio_write_qw_valid_parity_err_cnt(
2817 const struct cntr_entry *entry,
2818 void *context, int vl, int mode, u64 data)
2819{
2820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2821
2822 return dd->send_pio_err_status_cnt[23];
2823}
2824
2825static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2826 void *context, int vl, int mode,
2827 u64 data)
2828{
2829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2830
2831 return dd->send_pio_err_status_cnt[22];
2832}
2833
2834static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2835 void *context, int vl,
2836 int mode, u64 data)
2837{
2838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2839
2840 return dd->send_pio_err_status_cnt[21];
2841}
2842
2843static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2844 void *context, int vl,
2845 int mode, u64 data)
2846{
2847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2848
2849 return dd->send_pio_err_status_cnt[20];
2850}
2851
2852static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2853 void *context, int vl,
2854 int mode, u64 data)
2855{
2856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2857
2858 return dd->send_pio_err_status_cnt[19];
2859}
2860
2861static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2862 const struct cntr_entry *entry,
2863 void *context, int vl, int mode, u64 data)
2864{
2865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2866
2867 return dd->send_pio_err_status_cnt[18];
2868}
2869
2870static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2871 void *context, int vl, int mode,
2872 u64 data)
2873{
2874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2875
2876 return dd->send_pio_err_status_cnt[17];
2877}
2878
2879static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2880 void *context, int vl, int mode,
2881 u64 data)
2882{
2883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2884
2885 return dd->send_pio_err_status_cnt[16];
2886}
2887
2888static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2889 const struct cntr_entry *entry,
2890 void *context, int vl, int mode, u64 data)
2891{
2892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2893
2894 return dd->send_pio_err_status_cnt[15];
2895}
2896
2897static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2898 const struct cntr_entry *entry,
2899 void *context, int vl, int mode, u64 data)
2900{
2901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2902
2903 return dd->send_pio_err_status_cnt[14];
2904}
2905
2906static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2907 const struct cntr_entry *entry,
2908 void *context, int vl, int mode, u64 data)
2909{
2910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2911
2912 return dd->send_pio_err_status_cnt[13];
2913}
2914
2915static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2916 const struct cntr_entry *entry,
2917 void *context, int vl, int mode, u64 data)
2918{
2919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2920
2921 return dd->send_pio_err_status_cnt[12];
2922}
2923
2924static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2925 const struct cntr_entry *entry,
2926 void *context, int vl, int mode, u64 data)
2927{
2928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2929
2930 return dd->send_pio_err_status_cnt[11];
2931}
2932
2933static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2934 const struct cntr_entry *entry,
2935 void *context, int vl, int mode, u64 data)
2936{
2937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2938
2939 return dd->send_pio_err_status_cnt[10];
2940}
2941
2942static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2943 const struct cntr_entry *entry,
2944 void *context, int vl, int mode, u64 data)
2945{
2946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2947
2948 return dd->send_pio_err_status_cnt[9];
2949}
2950
2951static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2952 const struct cntr_entry *entry,
2953 void *context, int vl, int mode, u64 data)
2954{
2955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2956
2957 return dd->send_pio_err_status_cnt[8];
2958}
2959
2960static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2961 const struct cntr_entry *entry,
2962 void *context, int vl, int mode, u64 data)
2963{
2964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2965
2966 return dd->send_pio_err_status_cnt[7];
2967}
2968
2969static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2970 void *context, int vl, int mode,
2971 u64 data)
2972{
2973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2974
2975 return dd->send_pio_err_status_cnt[6];
2976}
2977
2978static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2979 void *context, int vl, int mode,
2980 u64 data)
2981{
2982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2983
2984 return dd->send_pio_err_status_cnt[5];
2985}
2986
2987static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2988 void *context, int vl, int mode,
2989 u64 data)
2990{
2991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2992
2993 return dd->send_pio_err_status_cnt[4];
2994}
2995
2996static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2997 void *context, int vl, int mode,
2998 u64 data)
2999{
3000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3001
3002 return dd->send_pio_err_status_cnt[3];
3003}
3004
3005static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3006 void *context, int vl, int mode,
3007 u64 data)
3008{
3009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3010
3011 return dd->send_pio_err_status_cnt[2];
3012}
3013
3014static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3015 void *context, int vl,
3016 int mode, u64 data)
3017{
3018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3019
3020 return dd->send_pio_err_status_cnt[1];
3021}
3022
3023static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3024 void *context, int vl, int mode,
3025 u64 data)
3026{
3027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3028
3029 return dd->send_pio_err_status_cnt[0];
3030}
3031
3032/*
3033 * Software counters corresponding to each of the
3034 * error status bits within SendDmaErrStatus
3035 */
3036static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3037 const struct cntr_entry *entry,
3038 void *context, int vl, int mode, u64 data)
3039{
3040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3041
3042 return dd->send_dma_err_status_cnt[3];
3043}
3044
3045static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3046 const struct cntr_entry *entry,
3047 void *context, int vl, int mode, u64 data)
3048{
3049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3050
3051 return dd->send_dma_err_status_cnt[2];
3052}
3053
3054static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3055 void *context, int vl, int mode,
3056 u64 data)
3057{
3058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3059
3060 return dd->send_dma_err_status_cnt[1];
3061}
3062
3063static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3064 void *context, int vl, int mode,
3065 u64 data)
3066{
3067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3068
3069 return dd->send_dma_err_status_cnt[0];
3070}
3071
3072/*
3073 * Software counters corresponding to each of the
3074 * error status bits within SendEgressErrStatus
3075 */
3076static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3077 const struct cntr_entry *entry,
3078 void *context, int vl, int mode, u64 data)
3079{
3080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3081
3082 return dd->send_egress_err_status_cnt[63];
3083}
3084
3085static u64 access_tx_read_sdma_memory_csr_err_cnt(
3086 const struct cntr_entry *entry,
3087 void *context, int vl, int mode, u64 data)
3088{
3089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3090
3091 return dd->send_egress_err_status_cnt[62];
3092}
3093
3094static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3095 void *context, int vl, int mode,
3096 u64 data)
3097{
3098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3099
3100 return dd->send_egress_err_status_cnt[61];
3101}
3102
3103static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3104 void *context, int vl,
3105 int mode, u64 data)
3106{
3107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3108
3109 return dd->send_egress_err_status_cnt[60];
3110}
3111
3112static u64 access_tx_read_sdma_memory_cor_err_cnt(
3113 const struct cntr_entry *entry,
3114 void *context, int vl, int mode, u64 data)
3115{
3116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3117
3118 return dd->send_egress_err_status_cnt[59];
3119}
3120
3121static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3122 void *context, int vl, int mode,
3123 u64 data)
3124{
3125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3126
3127 return dd->send_egress_err_status_cnt[58];
3128}
3129
3130static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3131 void *context, int vl, int mode,
3132 u64 data)
3133{
3134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3135
3136 return dd->send_egress_err_status_cnt[57];
3137}
3138
3139static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3140 void *context, int vl, int mode,
3141 u64 data)
3142{
3143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3144
3145 return dd->send_egress_err_status_cnt[56];
3146}
3147
3148static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3149 void *context, int vl, int mode,
3150 u64 data)
3151{
3152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3153
3154 return dd->send_egress_err_status_cnt[55];
3155}
3156
3157static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3158 void *context, int vl, int mode,
3159 u64 data)
3160{
3161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3162
3163 return dd->send_egress_err_status_cnt[54];
3164}
3165
3166static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3167 void *context, int vl, int mode,
3168 u64 data)
3169{
3170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3171
3172 return dd->send_egress_err_status_cnt[53];
3173}
3174
3175static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3176 void *context, int vl, int mode,
3177 u64 data)
3178{
3179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3180
3181 return dd->send_egress_err_status_cnt[52];
3182}
3183
3184static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3185 void *context, int vl, int mode,
3186 u64 data)
3187{
3188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3189
3190 return dd->send_egress_err_status_cnt[51];
3191}
3192
3193static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3194 void *context, int vl, int mode,
3195 u64 data)
3196{
3197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3198
3199 return dd->send_egress_err_status_cnt[50];
3200}
3201
3202static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3203 void *context, int vl, int mode,
3204 u64 data)
3205{
3206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3207
3208 return dd->send_egress_err_status_cnt[49];
3209}
3210
3211static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3212 void *context, int vl, int mode,
3213 u64 data)
3214{
3215 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3216
3217 return dd->send_egress_err_status_cnt[48];
3218}
3219
3220static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3221 void *context, int vl, int mode,
3222 u64 data)
3223{
3224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3225
3226 return dd->send_egress_err_status_cnt[47];
3227}
3228
3229static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3230 void *context, int vl, int mode,
3231 u64 data)
3232{
3233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3234
3235 return dd->send_egress_err_status_cnt[46];
3236}
3237
3238static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3239 void *context, int vl, int mode,
3240 u64 data)
3241{
3242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3243
3244 return dd->send_egress_err_status_cnt[45];
3245}
3246
3247static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3248 void *context, int vl,
3249 int mode, u64 data)
3250{
3251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3252
3253 return dd->send_egress_err_status_cnt[44];
3254}
3255
3256static u64 access_tx_read_sdma_memory_unc_err_cnt(
3257 const struct cntr_entry *entry,
3258 void *context, int vl, int mode, u64 data)
3259{
3260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3261
3262 return dd->send_egress_err_status_cnt[43];
3263}
3264
3265static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3266 void *context, int vl, int mode,
3267 u64 data)
3268{
3269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3270
3271 return dd->send_egress_err_status_cnt[42];
3272}
3273
3274static u64 access_tx_credit_return_partiy_err_cnt(
3275 const struct cntr_entry *entry,
3276 void *context, int vl, int mode, u64 data)
3277{
3278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3279
3280 return dd->send_egress_err_status_cnt[41];
3281}
3282
3283static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3284 const struct cntr_entry *entry,
3285 void *context, int vl, int mode, u64 data)
3286{
3287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3288
3289 return dd->send_egress_err_status_cnt[40];
3290}
3291
3292static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3293 const struct cntr_entry *entry,
3294 void *context, int vl, int mode, u64 data)
3295{
3296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3297
3298 return dd->send_egress_err_status_cnt[39];
3299}
3300
3301static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3302 const struct cntr_entry *entry,
3303 void *context, int vl, int mode, u64 data)
3304{
3305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3306
3307 return dd->send_egress_err_status_cnt[38];
3308}
3309
3310static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3311 const struct cntr_entry *entry,
3312 void *context, int vl, int mode, u64 data)
3313{
3314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3315
3316 return dd->send_egress_err_status_cnt[37];
3317}
3318
3319static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3320 const struct cntr_entry *entry,
3321 void *context, int vl, int mode, u64 data)
3322{
3323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3324
3325 return dd->send_egress_err_status_cnt[36];
3326}
3327
3328static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3329 const struct cntr_entry *entry,
3330 void *context, int vl, int mode, u64 data)
3331{
3332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3333
3334 return dd->send_egress_err_status_cnt[35];
3335}
3336
3337static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3338 const struct cntr_entry *entry,
3339 void *context, int vl, int mode, u64 data)
3340{
3341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3342
3343 return dd->send_egress_err_status_cnt[34];
3344}
3345
3346static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3347 const struct cntr_entry *entry,
3348 void *context, int vl, int mode, u64 data)
3349{
3350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3351
3352 return dd->send_egress_err_status_cnt[33];
3353}
3354
3355static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3356 const struct cntr_entry *entry,
3357 void *context, int vl, int mode, u64 data)
3358{
3359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3360
3361 return dd->send_egress_err_status_cnt[32];
3362}
3363
3364static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3365 const struct cntr_entry *entry,
3366 void *context, int vl, int mode, u64 data)
3367{
3368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3369
3370 return dd->send_egress_err_status_cnt[31];
3371}
3372
3373static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3374 const struct cntr_entry *entry,
3375 void *context, int vl, int mode, u64 data)
3376{
3377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3378
3379 return dd->send_egress_err_status_cnt[30];
3380}
3381
3382static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3383 const struct cntr_entry *entry,
3384 void *context, int vl, int mode, u64 data)
3385{
3386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3387
3388 return dd->send_egress_err_status_cnt[29];
3389}
3390
3391static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3392 const struct cntr_entry *entry,
3393 void *context, int vl, int mode, u64 data)
3394{
3395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3396
3397 return dd->send_egress_err_status_cnt[28];
3398}
3399
3400static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3401 const struct cntr_entry *entry,
3402 void *context, int vl, int mode, u64 data)
3403{
3404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3405
3406 return dd->send_egress_err_status_cnt[27];
3407}
3408
3409static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3410 const struct cntr_entry *entry,
3411 void *context, int vl, int mode, u64 data)
3412{
3413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3414
3415 return dd->send_egress_err_status_cnt[26];
3416}
3417
3418static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3419 const struct cntr_entry *entry,
3420 void *context, int vl, int mode, u64 data)
3421{
3422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3423
3424 return dd->send_egress_err_status_cnt[25];
3425}
3426
3427static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3428 const struct cntr_entry *entry,
3429 void *context, int vl, int mode, u64 data)
3430{
3431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3432
3433 return dd->send_egress_err_status_cnt[24];
3434}
3435
3436static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3437 const struct cntr_entry *entry,
3438 void *context, int vl, int mode, u64 data)
3439{
3440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3441
3442 return dd->send_egress_err_status_cnt[23];
3443}
3444
3445static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3446 const struct cntr_entry *entry,
3447 void *context, int vl, int mode, u64 data)
3448{
3449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3450
3451 return dd->send_egress_err_status_cnt[22];
3452}
3453
3454static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3455 const struct cntr_entry *entry,
3456 void *context, int vl, int mode, u64 data)
3457{
3458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3459
3460 return dd->send_egress_err_status_cnt[21];
3461}
3462
3463static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3464 const struct cntr_entry *entry,
3465 void *context, int vl, int mode, u64 data)
3466{
3467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3468
3469 return dd->send_egress_err_status_cnt[20];
3470}
3471
3472static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3473 const struct cntr_entry *entry,
3474 void *context, int vl, int mode, u64 data)
3475{
3476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3477
3478 return dd->send_egress_err_status_cnt[19];
3479}
3480
3481static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3482 const struct cntr_entry *entry,
3483 void *context, int vl, int mode, u64 data)
3484{
3485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3486
3487 return dd->send_egress_err_status_cnt[18];
3488}
3489
3490static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3491 const struct cntr_entry *entry,
3492 void *context, int vl, int mode, u64 data)
3493{
3494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3495
3496 return dd->send_egress_err_status_cnt[17];
3497}
3498
3499static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3500 const struct cntr_entry *entry,
3501 void *context, int vl, int mode, u64 data)
3502{
3503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3504
3505 return dd->send_egress_err_status_cnt[16];
3506}
3507
3508static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3509 void *context, int vl, int mode,
3510 u64 data)
3511{
3512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3513
3514 return dd->send_egress_err_status_cnt[15];
3515}
3516
3517static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3518 void *context, int vl,
3519 int mode, u64 data)
3520{
3521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3522
3523 return dd->send_egress_err_status_cnt[14];
3524}
3525
3526static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3527 void *context, int vl, int mode,
3528 u64 data)
3529{
3530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3531
3532 return dd->send_egress_err_status_cnt[13];
3533}
3534
3535static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3536 void *context, int vl, int mode,
3537 u64 data)
3538{
3539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3540
3541 return dd->send_egress_err_status_cnt[12];
3542}
3543
3544static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3545 const struct cntr_entry *entry,
3546 void *context, int vl, int mode, u64 data)
3547{
3548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3549
3550 return dd->send_egress_err_status_cnt[11];
3551}
3552
3553static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3554 void *context, int vl, int mode,
3555 u64 data)
3556{
3557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3558
3559 return dd->send_egress_err_status_cnt[10];
3560}
3561
3562static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3563 void *context, int vl, int mode,
3564 u64 data)
3565{
3566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3567
3568 return dd->send_egress_err_status_cnt[9];
3569}
3570
3571static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3572 const struct cntr_entry *entry,
3573 void *context, int vl, int mode, u64 data)
3574{
3575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3576
3577 return dd->send_egress_err_status_cnt[8];
3578}
3579
3580static u64 access_tx_pio_launch_intf_parity_err_cnt(
3581 const struct cntr_entry *entry,
3582 void *context, int vl, int mode, u64 data)
3583{
3584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3585
3586 return dd->send_egress_err_status_cnt[7];
3587}
3588
3589static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3590 void *context, int vl, int mode,
3591 u64 data)
3592{
3593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3594
3595 return dd->send_egress_err_status_cnt[6];
3596}
3597
3598static u64 access_tx_incorrect_link_state_err_cnt(
3599 const struct cntr_entry *entry,
3600 void *context, int vl, int mode, u64 data)
3601{
3602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3603
3604 return dd->send_egress_err_status_cnt[5];
3605}
3606
3607static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3608 void *context, int vl, int mode,
3609 u64 data)
3610{
3611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3612
3613 return dd->send_egress_err_status_cnt[4];
3614}
3615
3616static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3617 const struct cntr_entry *entry,
3618 void *context, int vl, int mode, u64 data)
3619{
3620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3621
3622 return dd->send_egress_err_status_cnt[3];
3623}
3624
3625static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3626 void *context, int vl, int mode,
3627 u64 data)
3628{
3629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3630
3631 return dd->send_egress_err_status_cnt[2];
3632}
3633
3634static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3635 const struct cntr_entry *entry,
3636 void *context, int vl, int mode, u64 data)
3637{
3638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3639
3640 return dd->send_egress_err_status_cnt[1];
3641}
3642
3643static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3644 const struct cntr_entry *entry,
3645 void *context, int vl, int mode, u64 data)
3646{
3647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3648
3649 return dd->send_egress_err_status_cnt[0];
3650}
3651
3652/*
3653 * Software counters corresponding to each of the
3654 * error status bits within SendErrStatus
3655 */
3656static u64 access_send_csr_write_bad_addr_err_cnt(
3657 const struct cntr_entry *entry,
3658 void *context, int vl, int mode, u64 data)
3659{
3660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3661
3662 return dd->send_err_status_cnt[2];
3663}
3664
3665static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3666 void *context, int vl,
3667 int mode, u64 data)
3668{
3669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3670
3671 return dd->send_err_status_cnt[1];
3672}
3673
3674static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3675 void *context, int vl, int mode,
3676 u64 data)
3677{
3678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3679
3680 return dd->send_err_status_cnt[0];
3681}
3682
3683/*
3684 * Software counters corresponding to each of the
3685 * error status bits within SendCtxtErrStatus
3686 */
3687static u64 access_pio_write_out_of_bounds_err_cnt(
3688 const struct cntr_entry *entry,
3689 void *context, int vl, int mode, u64 data)
3690{
3691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3692
3693 return dd->sw_ctxt_err_status_cnt[4];
3694}
3695
3696static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3697 void *context, int vl, int mode,
3698 u64 data)
3699{
3700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3701
3702 return dd->sw_ctxt_err_status_cnt[3];
3703}
3704
3705static u64 access_pio_write_crosses_boundary_err_cnt(
3706 const struct cntr_entry *entry,
3707 void *context, int vl, int mode, u64 data)
3708{
3709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3710
3711 return dd->sw_ctxt_err_status_cnt[2];
3712}
3713
3714static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3715 void *context, int vl,
3716 int mode, u64 data)
3717{
3718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3719
3720 return dd->sw_ctxt_err_status_cnt[1];
3721}
3722
3723static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3724 void *context, int vl, int mode,
3725 u64 data)
3726{
3727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3728
3729 return dd->sw_ctxt_err_status_cnt[0];
3730}
3731
3732/*
3733 * Software counters corresponding to each of the
3734 * error status bits within SendDmaEngErrStatus
3735 */
3736static u64 access_sdma_header_request_fifo_cor_err_cnt(
3737 const struct cntr_entry *entry,
3738 void *context, int vl, int mode, u64 data)
3739{
3740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3741
3742 return dd->sw_send_dma_eng_err_status_cnt[23];
3743}
3744
3745static u64 access_sdma_header_storage_cor_err_cnt(
3746 const struct cntr_entry *entry,
3747 void *context, int vl, int mode, u64 data)
3748{
3749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3750
3751 return dd->sw_send_dma_eng_err_status_cnt[22];
3752}
3753
3754static u64 access_sdma_packet_tracking_cor_err_cnt(
3755 const struct cntr_entry *entry,
3756 void *context, int vl, int mode, u64 data)
3757{
3758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3759
3760 return dd->sw_send_dma_eng_err_status_cnt[21];
3761}
3762
3763static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3764 void *context, int vl, int mode,
3765 u64 data)
3766{
3767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3768
3769 return dd->sw_send_dma_eng_err_status_cnt[20];
3770}
3771
3772static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3773 void *context, int vl, int mode,
3774 u64 data)
3775{
3776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3777
3778 return dd->sw_send_dma_eng_err_status_cnt[19];
3779}
3780
3781static u64 access_sdma_header_request_fifo_unc_err_cnt(
3782 const struct cntr_entry *entry,
3783 void *context, int vl, int mode, u64 data)
3784{
3785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3786
3787 return dd->sw_send_dma_eng_err_status_cnt[18];
3788}
3789
3790static u64 access_sdma_header_storage_unc_err_cnt(
3791 const struct cntr_entry *entry,
3792 void *context, int vl, int mode, u64 data)
3793{
3794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3795
3796 return dd->sw_send_dma_eng_err_status_cnt[17];
3797}
3798
3799static u64 access_sdma_packet_tracking_unc_err_cnt(
3800 const struct cntr_entry *entry,
3801 void *context, int vl, int mode, u64 data)
3802{
3803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3804
3805 return dd->sw_send_dma_eng_err_status_cnt[16];
3806}
3807
3808static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3809 void *context, int vl, int mode,
3810 u64 data)
3811{
3812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3813
3814 return dd->sw_send_dma_eng_err_status_cnt[15];
3815}
3816
3817static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3818 void *context, int vl, int mode,
3819 u64 data)
3820{
3821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3822
3823 return dd->sw_send_dma_eng_err_status_cnt[14];
3824}
3825
3826static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3827 void *context, int vl, int mode,
3828 u64 data)
3829{
3830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3831
3832 return dd->sw_send_dma_eng_err_status_cnt[13];
3833}
3834
3835static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3836 void *context, int vl, int mode,
3837 u64 data)
3838{
3839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3840
3841 return dd->sw_send_dma_eng_err_status_cnt[12];
3842}
3843
3844static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3845 void *context, int vl, int mode,
3846 u64 data)
3847{
3848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3849
3850 return dd->sw_send_dma_eng_err_status_cnt[11];
3851}
3852
3853static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3854 void *context, int vl, int mode,
3855 u64 data)
3856{
3857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3858
3859 return dd->sw_send_dma_eng_err_status_cnt[10];
3860}
3861
3862static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3863 void *context, int vl, int mode,
3864 u64 data)
3865{
3866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3867
3868 return dd->sw_send_dma_eng_err_status_cnt[9];
3869}
3870
3871static u64 access_sdma_packet_desc_overflow_err_cnt(
3872 const struct cntr_entry *entry,
3873 void *context, int vl, int mode, u64 data)
3874{
3875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3876
3877 return dd->sw_send_dma_eng_err_status_cnt[8];
3878}
3879
3880static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3881 void *context, int vl,
3882 int mode, u64 data)
3883{
3884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3885
3886 return dd->sw_send_dma_eng_err_status_cnt[7];
3887}
3888
3889static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3890 void *context, int vl, int mode, u64 data)
3891{
3892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3893
3894 return dd->sw_send_dma_eng_err_status_cnt[6];
3895}
3896
3897static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3898 void *context, int vl, int mode,
3899 u64 data)
3900{
3901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3902
3903 return dd->sw_send_dma_eng_err_status_cnt[5];
3904}
3905
3906static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3907 void *context, int vl, int mode,
3908 u64 data)
3909{
3910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3911
3912 return dd->sw_send_dma_eng_err_status_cnt[4];
3913}
3914
3915static u64 access_sdma_tail_out_of_bounds_err_cnt(
3916 const struct cntr_entry *entry,
3917 void *context, int vl, int mode, u64 data)
3918{
3919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3920
3921 return dd->sw_send_dma_eng_err_status_cnt[3];
3922}
3923
3924static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3925 void *context, int vl, int mode,
3926 u64 data)
3927{
3928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3929
3930 return dd->sw_send_dma_eng_err_status_cnt[2];
3931}
3932
3933static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3934 void *context, int vl, int mode,
3935 u64 data)
3936{
3937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3938
3939 return dd->sw_send_dma_eng_err_status_cnt[1];
3940}
3941
3942static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3943 void *context, int vl, int mode,
3944 u64 data)
3945{
3946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3947
3948 return dd->sw_send_dma_eng_err_status_cnt[0];
3949}
3950
Mike Marciniszyn77241052015-07-30 15:17:43 -04003951#define def_access_sw_cpu(cntr) \
3952static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3953 void *context, int vl, int mode, u64 data) \
3954{ \
3955 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003956 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3957 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003958 mode, data); \
3959}
3960
3961def_access_sw_cpu(rc_acks);
3962def_access_sw_cpu(rc_qacks);
3963def_access_sw_cpu(rc_delayed_comp);
3964
3965#define def_access_ibp_counter(cntr) \
3966static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3967 void *context, int vl, int mode, u64 data) \
3968{ \
3969 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3970 \
3971 if (vl != CNTR_INVALID_VL) \
3972 return 0; \
3973 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003974 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003975 mode, data); \
3976}
3977
3978def_access_ibp_counter(loop_pkts);
3979def_access_ibp_counter(rc_resends);
3980def_access_ibp_counter(rnr_naks);
3981def_access_ibp_counter(other_naks);
3982def_access_ibp_counter(rc_timeouts);
3983def_access_ibp_counter(pkt_drops);
3984def_access_ibp_counter(dmawait);
3985def_access_ibp_counter(rc_seqnak);
3986def_access_ibp_counter(rc_dupreq);
3987def_access_ibp_counter(rdma_seq);
3988def_access_ibp_counter(unaligned);
3989def_access_ibp_counter(seq_naks);
3990
3991static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3992[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3993[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3994 CNTR_NORMAL),
3995[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3996 CNTR_NORMAL),
3997[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3998 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3999 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004000[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4001 CNTR_NORMAL),
4002[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4003 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4004[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4005 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4006[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4007 CNTR_NORMAL),
4008[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4009 CNTR_NORMAL),
4010[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4011 CNTR_NORMAL),
4012[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4013 CNTR_NORMAL),
4014[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4015 CNTR_NORMAL),
4016[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4017 CNTR_NORMAL),
4018[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4019 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4020[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4021 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4022[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4023 CNTR_SYNTH),
4024[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4025[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4026 CNTR_SYNTH),
4027[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4028 CNTR_SYNTH),
4029[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4030 CNTR_SYNTH),
4031[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4032 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4033[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4034 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4035 CNTR_SYNTH),
4036[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4037 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4038[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4039 CNTR_SYNTH),
4040[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4041 CNTR_SYNTH),
4042[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4043 CNTR_SYNTH),
4044[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4045 CNTR_SYNTH),
4046[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4047 CNTR_SYNTH),
4048[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4049 CNTR_SYNTH),
4050[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4051 CNTR_SYNTH),
4052[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4053 CNTR_SYNTH | CNTR_VL),
4054[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4055 CNTR_SYNTH | CNTR_VL),
4056[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4057[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4058 CNTR_SYNTH | CNTR_VL),
4059[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4060[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4061 CNTR_SYNTH | CNTR_VL),
4062[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4063 CNTR_SYNTH),
4064[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4065 CNTR_SYNTH | CNTR_VL),
4066[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4067 CNTR_SYNTH),
4068[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4069 CNTR_SYNTH | CNTR_VL),
4070[C_DC_TOTAL_CRC] =
4071 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4072 CNTR_SYNTH),
4073[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4074 CNTR_SYNTH),
4075[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4076 CNTR_SYNTH),
4077[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4078 CNTR_SYNTH),
4079[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4080 CNTR_SYNTH),
4081[C_DC_CRC_MULT_LN] =
4082 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4083 CNTR_SYNTH),
4084[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4085 CNTR_SYNTH),
4086[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4087 CNTR_SYNTH),
4088[C_DC_SEQ_CRC_CNT] =
4089 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4090 CNTR_SYNTH),
4091[C_DC_ESC0_ONLY_CNT] =
4092 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4093 CNTR_SYNTH),
4094[C_DC_ESC0_PLUS1_CNT] =
4095 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4096 CNTR_SYNTH),
4097[C_DC_ESC0_PLUS2_CNT] =
4098 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4099 CNTR_SYNTH),
4100[C_DC_REINIT_FROM_PEER_CNT] =
4101 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4102 CNTR_SYNTH),
4103[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4104 CNTR_SYNTH),
4105[C_DC_MISC_FLG_CNT] =
4106 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4107 CNTR_SYNTH),
4108[C_DC_PRF_GOOD_LTP_CNT] =
4109 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4110[C_DC_PRF_ACCEPTED_LTP_CNT] =
4111 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4112 CNTR_SYNTH),
4113[C_DC_PRF_RX_FLIT_CNT] =
4114 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4115[C_DC_PRF_TX_FLIT_CNT] =
4116 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4117[C_DC_PRF_CLK_CNTR] =
4118 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4119[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4120 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4121[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4122 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4123 CNTR_SYNTH),
4124[C_DC_PG_STS_TX_SBE_CNT] =
4125 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4126[C_DC_PG_STS_TX_MBE_CNT] =
4127 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4128 CNTR_SYNTH),
4129[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4130 access_sw_cpu_intr),
4131[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4132 access_sw_cpu_rcv_limit),
4133[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4134 access_sw_vtx_wait),
4135[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4136 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004137[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4138 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004139[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4140 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004141[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4142 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004143[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4144 SEND_DMA_DESC_FETCHED_CNT, 0,
4145 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4146 dev_access_u32_csr),
4147[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4148 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4149 access_sde_int_cnt),
4150[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4151 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4152 access_sde_err_cnt),
4153[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4154 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4155 access_sde_idle_int_cnt),
4156[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4157 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4158 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004159/* MISC_ERR_STATUS */
4160[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4161 CNTR_NORMAL,
4162 access_misc_pll_lock_fail_err_cnt),
4163[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4164 CNTR_NORMAL,
4165 access_misc_mbist_fail_err_cnt),
4166[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4167 CNTR_NORMAL,
4168 access_misc_invalid_eep_cmd_err_cnt),
4169[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4170 CNTR_NORMAL,
4171 access_misc_efuse_done_parity_err_cnt),
4172[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4173 CNTR_NORMAL,
4174 access_misc_efuse_write_err_cnt),
4175[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4176 0, CNTR_NORMAL,
4177 access_misc_efuse_read_bad_addr_err_cnt),
4178[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4179 CNTR_NORMAL,
4180 access_misc_efuse_csr_parity_err_cnt),
4181[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4182 CNTR_NORMAL,
4183 access_misc_fw_auth_failed_err_cnt),
4184[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4185 CNTR_NORMAL,
4186 access_misc_key_mismatch_err_cnt),
4187[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4188 CNTR_NORMAL,
4189 access_misc_sbus_write_failed_err_cnt),
4190[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4191 CNTR_NORMAL,
4192 access_misc_csr_write_bad_addr_err_cnt),
4193[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4194 CNTR_NORMAL,
4195 access_misc_csr_read_bad_addr_err_cnt),
4196[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4197 CNTR_NORMAL,
4198 access_misc_csr_parity_err_cnt),
4199/* CceErrStatus */
4200[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4201 CNTR_NORMAL,
4202 access_sw_cce_err_status_aggregated_cnt),
4203[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4204 CNTR_NORMAL,
4205 access_cce_msix_csr_parity_err_cnt),
4206[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4207 CNTR_NORMAL,
4208 access_cce_int_map_unc_err_cnt),
4209[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4210 CNTR_NORMAL,
4211 access_cce_int_map_cor_err_cnt),
4212[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4213 CNTR_NORMAL,
4214 access_cce_msix_table_unc_err_cnt),
4215[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4216 CNTR_NORMAL,
4217 access_cce_msix_table_cor_err_cnt),
4218[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4219 0, CNTR_NORMAL,
4220 access_cce_rxdma_conv_fifo_parity_err_cnt),
4221[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4222 0, CNTR_NORMAL,
4223 access_cce_rcpl_async_fifo_parity_err_cnt),
4224[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4225 CNTR_NORMAL,
4226 access_cce_seg_write_bad_addr_err_cnt),
4227[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4228 CNTR_NORMAL,
4229 access_cce_seg_read_bad_addr_err_cnt),
4230[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4231 CNTR_NORMAL,
4232 access_la_triggered_cnt),
4233[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4234 CNTR_NORMAL,
4235 access_cce_trgt_cpl_timeout_err_cnt),
4236[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_pcic_receive_parity_err_cnt),
4239[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_pcic_transmit_back_parity_err_cnt),
4242[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4243 0, CNTR_NORMAL,
4244 access_pcic_transmit_front_parity_err_cnt),
4245[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_pcic_cpl_dat_q_unc_err_cnt),
4248[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_pcic_cpl_hd_q_unc_err_cnt),
4251[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_pcic_post_dat_q_unc_err_cnt),
4254[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_pcic_post_hd_q_unc_err_cnt),
4257[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_pcic_retry_sot_mem_unc_err_cnt),
4260[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_pcic_retry_mem_unc_err),
4263[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_pcic_n_post_dat_q_parity_err_cnt),
4266[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_pcic_n_post_h_q_parity_err_cnt),
4269[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_pcic_cpl_dat_q_cor_err_cnt),
4272[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4273 CNTR_NORMAL,
4274 access_pcic_cpl_hd_q_cor_err_cnt),
4275[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_pcic_post_dat_q_cor_err_cnt),
4278[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4279 CNTR_NORMAL,
4280 access_pcic_post_hd_q_cor_err_cnt),
4281[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4282 CNTR_NORMAL,
4283 access_pcic_retry_sot_mem_cor_err_cnt),
4284[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4285 CNTR_NORMAL,
4286 access_pcic_retry_mem_cor_err_cnt),
4287[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4288 "CceCli1AsyncFifoDbgParityError", 0, 0,
4289 CNTR_NORMAL,
4290 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4291[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4292 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4293 CNTR_NORMAL,
4294 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4295 ),
4296[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4297 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4298 CNTR_NORMAL,
4299 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4300[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4301 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4304[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4305 0, CNTR_NORMAL,
4306 access_cce_cli2_async_fifo_parity_err_cnt),
4307[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_cce_csr_cfg_bus_parity_err_cnt),
4310[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4311 0, CNTR_NORMAL,
4312 access_cce_cli0_async_fifo_parity_err_cnt),
4313[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_cce_rspd_data_parity_err_cnt),
4316[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4317 CNTR_NORMAL,
4318 access_cce_trgt_access_err_cnt),
4319[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4320 0, CNTR_NORMAL,
4321 access_cce_trgt_async_fifo_parity_err_cnt),
4322[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4323 CNTR_NORMAL,
4324 access_cce_csr_write_bad_addr_err_cnt),
4325[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4326 CNTR_NORMAL,
4327 access_cce_csr_read_bad_addr_err_cnt),
4328[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4329 CNTR_NORMAL,
4330 access_ccs_csr_parity_err_cnt),
4331
4332/* RcvErrStatus */
4333[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_rx_csr_parity_err_cnt),
4336[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rx_csr_write_bad_addr_err_cnt),
4339[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rx_csr_read_bad_addr_err_cnt),
4342[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rx_dma_csr_unc_err_cnt),
4345[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_rx_dma_dq_fsm_encoding_err_cnt),
4348[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_rx_dma_eq_fsm_encoding_err_cnt),
4351[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_rx_dma_csr_parity_err_cnt),
4354[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_rx_rbuf_data_cor_err_cnt),
4357[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_rx_rbuf_data_unc_err_cnt),
4360[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_rx_dma_data_fifo_rd_cor_err_cnt),
4363[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_dma_data_fifo_rd_unc_err_cnt),
4366[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4369[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4372[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_rbuf_desc_part2_cor_err_cnt),
4375[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_rbuf_desc_part2_unc_err_cnt),
4378[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_rx_rbuf_desc_part1_cor_err_cnt),
4381[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_rx_rbuf_desc_part1_unc_err_cnt),
4384[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_hq_intr_fsm_err_cnt),
4387[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_hq_intr_csr_parity_err_cnt),
4390[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_rx_lookup_csr_parity_err_cnt),
4393[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_rx_lookup_rcv_array_cor_err_cnt),
4396[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_rx_lookup_rcv_array_unc_err_cnt),
4399[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4400 0, CNTR_NORMAL,
4401 access_rx_lookup_des_part2_parity_err_cnt),
4402[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4403 0, CNTR_NORMAL,
4404 access_rx_lookup_des_part1_unc_cor_err_cnt),
4405[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_rx_lookup_des_part1_unc_err_cnt),
4408[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_rbuf_next_free_buf_cor_err_cnt),
4411[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rx_rbuf_next_free_buf_unc_err_cnt),
4414[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4415 "RxRbufFlInitWrAddrParityErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4418[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4419 0, CNTR_NORMAL,
4420 access_rx_rbuf_fl_initdone_parity_err_cnt),
4421[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4422 0, CNTR_NORMAL,
4423 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4424[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4425 CNTR_NORMAL,
4426 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4427[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4428 CNTR_NORMAL,
4429 access_rx_rbuf_empty_err_cnt),
4430[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4431 CNTR_NORMAL,
4432 access_rx_rbuf_full_err_cnt),
4433[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4434 CNTR_NORMAL,
4435 access_rbuf_bad_lookup_err_cnt),
4436[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4437 CNTR_NORMAL,
4438 access_rbuf_ctx_id_parity_err_cnt),
4439[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4440 CNTR_NORMAL,
4441 access_rbuf_csr_qeopdw_parity_err_cnt),
4442[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4443 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4444 CNTR_NORMAL,
4445 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4446[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4447 "RxRbufCsrQTlPtrParityErr", 0, 0,
4448 CNTR_NORMAL,
4449 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4450[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4451 0, CNTR_NORMAL,
4452 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4453[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4454 0, CNTR_NORMAL,
4455 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4456[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4457 0, 0, CNTR_NORMAL,
4458 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4459[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4460 0, CNTR_NORMAL,
4461 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4462[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4463 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4466[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4467 0, CNTR_NORMAL,
4468 access_rx_rbuf_block_list_read_cor_err_cnt),
4469[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4470 0, CNTR_NORMAL,
4471 access_rx_rbuf_block_list_read_unc_err_cnt),
4472[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_rbuf_lookup_des_cor_err_cnt),
4475[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_rbuf_lookup_des_unc_err_cnt),
4478[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4479 "RxRbufLookupDesRegUncCorErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4482[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4485[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_rbuf_free_list_cor_err_cnt),
4488[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rbuf_free_list_unc_err_cnt),
4491[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_rcv_fsm_encoding_err_cnt),
4494[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_dma_flag_cor_err_cnt),
4497[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_dma_flag_unc_err_cnt),
4500[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_dc_sop_eop_parity_err_cnt),
4503[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_rcv_csr_parity_err_cnt),
4506[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_rcv_qp_map_table_cor_err_cnt),
4509[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_rx_rcv_qp_map_table_unc_err_cnt),
4512[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_rx_rcv_data_cor_err_cnt),
4515[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4516 CNTR_NORMAL,
4517 access_rx_rcv_data_unc_err_cnt),
4518[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4519 CNTR_NORMAL,
4520 access_rx_rcv_hdr_cor_err_cnt),
4521[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4522 CNTR_NORMAL,
4523 access_rx_rcv_hdr_unc_err_cnt),
4524[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4525 CNTR_NORMAL,
4526 access_rx_dc_intf_parity_err_cnt),
4527[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4528 CNTR_NORMAL,
4529 access_rx_dma_csr_cor_err_cnt),
4530/* SendPioErrStatus */
4531[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4532 CNTR_NORMAL,
4533 access_pio_pec_sop_head_parity_err_cnt),
4534[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_pcc_sop_head_parity_err_cnt),
4537[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4538 0, 0, CNTR_NORMAL,
4539 access_pio_last_returned_cnt_parity_err_cnt),
4540[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4541 0, CNTR_NORMAL,
4542 access_pio_current_free_cnt_parity_err_cnt),
4543[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4544 CNTR_NORMAL,
4545 access_pio_reserved_31_err_cnt),
4546[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4547 CNTR_NORMAL,
4548 access_pio_reserved_30_err_cnt),
4549[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_pio_ppmc_sop_len_err_cnt),
4552[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_pio_ppmc_bqc_mem_parity_err_cnt),
4555[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_pio_vl_fifo_parity_err_cnt),
4558[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_pio_vlf_sop_parity_err_cnt),
4561[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_vlf_v1_len_parity_err_cnt),
4564[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_block_qw_count_parity_err_cnt),
4567[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4568 CNTR_NORMAL,
4569 access_pio_write_qw_valid_parity_err_cnt),
4570[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4571 CNTR_NORMAL,
4572 access_pio_state_machine_err_cnt),
4573[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_write_data_parity_err_cnt),
4576[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_host_addr_mem_cor_err_cnt),
4579[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_host_addr_mem_unc_err_cnt),
4582[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4585[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_init_sm_in_err_cnt),
4588[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_ppmc_pbl_fifo_err_cnt),
4591[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4592 0, CNTR_NORMAL,
4593 access_pio_credit_ret_fifo_parity_err_cnt),
4594[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_v1_len_mem_bank1_cor_err_cnt),
4597[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_v1_len_mem_bank0_cor_err_cnt),
4600[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_v1_len_mem_bank1_unc_err_cnt),
4603[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_pio_v1_len_mem_bank0_unc_err_cnt),
4606[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_sm_pkt_reset_parity_err_cnt),
4609[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_pkt_evict_fifo_parity_err_cnt),
4612[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4613 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4616[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_sbrdctl_crrel_parity_err_cnt),
4619[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4620 CNTR_NORMAL,
4621 access_pio_pec_fifo_parity_err_cnt),
4622[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4623 CNTR_NORMAL,
4624 access_pio_pcc_fifo_parity_err_cnt),
4625[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4626 CNTR_NORMAL,
4627 access_pio_sb_mem_fifo1_err_cnt),
4628[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4629 CNTR_NORMAL,
4630 access_pio_sb_mem_fifo0_err_cnt),
4631[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4632 CNTR_NORMAL,
4633 access_pio_csr_parity_err_cnt),
4634[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4635 CNTR_NORMAL,
4636 access_pio_write_addr_parity_err_cnt),
4637[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4638 CNTR_NORMAL,
4639 access_pio_write_bad_ctxt_err_cnt),
4640/* SendDmaErrStatus */
4641[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4642 0, CNTR_NORMAL,
4643 access_sdma_pcie_req_tracking_cor_err_cnt),
4644[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4645 0, CNTR_NORMAL,
4646 access_sdma_pcie_req_tracking_unc_err_cnt),
4647[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_sdma_csr_parity_err_cnt),
4650[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_sdma_rpy_tag_err_cnt),
4653/* SendEgressErrStatus */
4654[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4655 CNTR_NORMAL,
4656 access_tx_read_pio_memory_csr_unc_err_cnt),
4657[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4658 0, CNTR_NORMAL,
4659 access_tx_read_sdma_memory_csr_err_cnt),
4660[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_tx_egress_fifo_cor_err_cnt),
4663[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_tx_read_pio_memory_cor_err_cnt),
4666[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_tx_read_sdma_memory_cor_err_cnt),
4669[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_tx_sb_hdr_cor_err_cnt),
4672[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_tx_credit_overrun_err_cnt),
4675[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_tx_launch_fifo8_cor_err_cnt),
4678[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_tx_launch_fifo7_cor_err_cnt),
4681[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_tx_launch_fifo6_cor_err_cnt),
4684[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_launch_fifo5_cor_err_cnt),
4687[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4688 CNTR_NORMAL,
4689 access_tx_launch_fifo4_cor_err_cnt),
4690[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_launch_fifo3_cor_err_cnt),
4693[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_launch_fifo2_cor_err_cnt),
4696[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_launch_fifo1_cor_err_cnt),
4699[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_launch_fifo0_cor_err_cnt),
4702[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4703 CNTR_NORMAL,
4704 access_tx_credit_return_vl_err_cnt),
4705[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4706 CNTR_NORMAL,
4707 access_tx_hcrc_insertion_err_cnt),
4708[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4709 CNTR_NORMAL,
4710 access_tx_egress_fifo_unc_err_cnt),
4711[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4712 CNTR_NORMAL,
4713 access_tx_read_pio_memory_unc_err_cnt),
4714[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4715 CNTR_NORMAL,
4716 access_tx_read_sdma_memory_unc_err_cnt),
4717[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4718 CNTR_NORMAL,
4719 access_tx_sb_hdr_unc_err_cnt),
4720[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4721 CNTR_NORMAL,
4722 access_tx_credit_return_partiy_err_cnt),
4723[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4724 0, 0, CNTR_NORMAL,
4725 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4726[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4727 0, 0, CNTR_NORMAL,
4728 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4729[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4730 0, 0, CNTR_NORMAL,
4731 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4732[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4733 0, 0, CNTR_NORMAL,
4734 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4735[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4736 0, 0, CNTR_NORMAL,
4737 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4738[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4739 0, 0, CNTR_NORMAL,
4740 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4741[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4742 0, 0, CNTR_NORMAL,
4743 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4744[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4745 0, 0, CNTR_NORMAL,
4746 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4747[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4748 0, 0, CNTR_NORMAL,
4749 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4750[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4751 0, 0, CNTR_NORMAL,
4752 access_tx_sdma15_disallowed_packet_err_cnt),
4753[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_sdma14_disallowed_packet_err_cnt),
4756[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_sdma13_disallowed_packet_err_cnt),
4759[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_sdma12_disallowed_packet_err_cnt),
4762[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_sdma11_disallowed_packet_err_cnt),
4765[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_sdma10_disallowed_packet_err_cnt),
4768[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_sdma9_disallowed_packet_err_cnt),
4771[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_sdma8_disallowed_packet_err_cnt),
4774[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_sdma7_disallowed_packet_err_cnt),
4777[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4778 0, 0, CNTR_NORMAL,
4779 access_tx_sdma6_disallowed_packet_err_cnt),
4780[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4781 0, 0, CNTR_NORMAL,
4782 access_tx_sdma5_disallowed_packet_err_cnt),
4783[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4784 0, 0, CNTR_NORMAL,
4785 access_tx_sdma4_disallowed_packet_err_cnt),
4786[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4787 0, 0, CNTR_NORMAL,
4788 access_tx_sdma3_disallowed_packet_err_cnt),
4789[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4790 0, 0, CNTR_NORMAL,
4791 access_tx_sdma2_disallowed_packet_err_cnt),
4792[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4793 0, 0, CNTR_NORMAL,
4794 access_tx_sdma1_disallowed_packet_err_cnt),
4795[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4796 0, 0, CNTR_NORMAL,
4797 access_tx_sdma0_disallowed_packet_err_cnt),
4798[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4799 CNTR_NORMAL,
4800 access_tx_config_parity_err_cnt),
4801[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4802 CNTR_NORMAL,
4803 access_tx_sbrd_ctl_csr_parity_err_cnt),
4804[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4805 CNTR_NORMAL,
4806 access_tx_launch_csr_parity_err_cnt),
4807[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4808 CNTR_NORMAL,
4809 access_tx_illegal_vl_err_cnt),
4810[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4811 "TxSbrdCtlStateMachineParityErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4814[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4815 CNTR_NORMAL,
4816 access_egress_reserved_10_err_cnt),
4817[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4818 CNTR_NORMAL,
4819 access_egress_reserved_9_err_cnt),
4820[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4821 0, 0, CNTR_NORMAL,
4822 access_tx_sdma_launch_intf_parity_err_cnt),
4823[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_pio_launch_intf_parity_err_cnt),
4826[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4827 CNTR_NORMAL,
4828 access_egress_reserved_6_err_cnt),
4829[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_incorrect_link_state_err_cnt),
4832[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_tx_linkdown_err_cnt),
4835[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4836 "EgressFifoUnderrunOrParityErr", 0, 0,
4837 CNTR_NORMAL,
4838 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4839[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4840 CNTR_NORMAL,
4841 access_egress_reserved_2_err_cnt),
4842[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4843 CNTR_NORMAL,
4844 access_tx_pkt_integrity_mem_unc_err_cnt),
4845[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4846 CNTR_NORMAL,
4847 access_tx_pkt_integrity_mem_cor_err_cnt),
4848/* SendErrStatus */
4849[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4850 CNTR_NORMAL,
4851 access_send_csr_write_bad_addr_err_cnt),
4852[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4853 CNTR_NORMAL,
4854 access_send_csr_read_bad_addr_err_cnt),
4855[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4856 CNTR_NORMAL,
4857 access_send_csr_parity_cnt),
4858/* SendCtxtErrStatus */
4859[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4860 CNTR_NORMAL,
4861 access_pio_write_out_of_bounds_err_cnt),
4862[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_pio_write_overflow_err_cnt),
4865[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4866 0, 0, CNTR_NORMAL,
4867 access_pio_write_crosses_boundary_err_cnt),
4868[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4869 CNTR_NORMAL,
4870 access_pio_disallowed_packet_err_cnt),
4871[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_pio_inconsistent_sop_err_cnt),
4874/* SendDmaEngErrStatus */
4875[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4876 0, 0, CNTR_NORMAL,
4877 access_sdma_header_request_fifo_cor_err_cnt),
4878[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_sdma_header_storage_cor_err_cnt),
4881[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_sdma_packet_tracking_cor_err_cnt),
4884[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_sdma_assembly_cor_err_cnt),
4887[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_sdma_desc_table_cor_err_cnt),
4890[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4891 0, 0, CNTR_NORMAL,
4892 access_sdma_header_request_fifo_unc_err_cnt),
4893[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_sdma_header_storage_unc_err_cnt),
4896[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4897 CNTR_NORMAL,
4898 access_sdma_packet_tracking_unc_err_cnt),
4899[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_sdma_assembly_unc_err_cnt),
4902[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_sdma_desc_table_unc_err_cnt),
4905[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_sdma_timeout_err_cnt),
4908[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_header_length_err_cnt),
4911[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_header_address_err_cnt),
4914[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_header_select_err_cnt),
4917[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_reserved_9_err_cnt),
4920[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_sdma_packet_desc_overflow_err_cnt),
4923[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_length_mismatch_err_cnt),
4926[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4927 CNTR_NORMAL,
4928 access_sdma_halt_err_cnt),
4929[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4930 CNTR_NORMAL,
4931 access_sdma_mem_read_err_cnt),
4932[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_sdma_first_desc_err_cnt),
4935[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4936 CNTR_NORMAL,
4937 access_sdma_tail_out_of_bounds_err_cnt),
4938[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4939 CNTR_NORMAL,
4940 access_sdma_too_long_err_cnt),
4941[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_sdma_gen_mismatch_err_cnt),
4944[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4945 CNTR_NORMAL,
4946 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004947};
4948
4949static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4950[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4951 CNTR_NORMAL),
4952[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4953 CNTR_NORMAL),
4954[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4955 CNTR_NORMAL),
4956[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4957 CNTR_NORMAL),
4958[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4959 CNTR_NORMAL),
4960[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4961 CNTR_NORMAL),
4962[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4963 CNTR_NORMAL),
4964[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4965[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4966[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4967[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4968 CNTR_SYNTH | CNTR_VL),
4969[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4970 CNTR_SYNTH | CNTR_VL),
4971[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4972 CNTR_SYNTH | CNTR_VL),
4973[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4974[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4975[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4976 access_sw_link_dn_cnt),
4977[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4978 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004979[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4980 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004981[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4982 access_sw_xmit_discards),
4983[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4984 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4985 access_sw_xmit_discards),
4986[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4987 access_xmit_constraint_errs),
4988[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4989 access_rcv_constraint_errs),
4990[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4991[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4992[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4993[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4994[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4995[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4996[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4997[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4998[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4999[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5000[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5001[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5002[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5003 access_sw_cpu_rc_acks),
5004[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5005 access_sw_cpu_rc_qacks),
5006[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5007 access_sw_cpu_rc_delayed_comp),
5008[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5009[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5010[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5011[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5012[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5013[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5014[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5015[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5016[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5017[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5018[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5019[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5020[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5021[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5022[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5023[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5024[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5025[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5026[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5027[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5028[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5029[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5030[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5031[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5032[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5033[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5034[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5035[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5036[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5037[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5038[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5039[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5040[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5041[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5042[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5043[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5044[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5045[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5046[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5047[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5048[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5049[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5050[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5051[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5052[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5053[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5054[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5055[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5056[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5057[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5058[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5059[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5060[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5061[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5062[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5063[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5064[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5065[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5066[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5067[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5068[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5069[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5070[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5071[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5072[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5073[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5074[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5075[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5076[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5077[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5078[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5079[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5080[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5081[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5082[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5083[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5084[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5085[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5086[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5087[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5088};
5089
5090/* ======================================================================== */
5091
Mike Marciniszyn77241052015-07-30 15:17:43 -04005092/* return true if this is chip revision revision a */
5093int is_ax(struct hfi1_devdata *dd)
5094{
5095 u8 chip_rev_minor =
5096 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5097 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5098 return (chip_rev_minor & 0xf0) == 0;
5099}
5100
5101/* return true if this is chip revision revision b */
5102int is_bx(struct hfi1_devdata *dd)
5103{
5104 u8 chip_rev_minor =
5105 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5106 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005107 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005108}
5109
5110/*
5111 * Append string s to buffer buf. Arguments curp and len are the current
5112 * position and remaining length, respectively.
5113 *
5114 * return 0 on success, 1 on out of room
5115 */
5116static int append_str(char *buf, char **curp, int *lenp, const char *s)
5117{
5118 char *p = *curp;
5119 int len = *lenp;
5120 int result = 0; /* success */
5121 char c;
5122
5123 /* add a comma, if first in the buffer */
5124 if (p != buf) {
5125 if (len == 0) {
5126 result = 1; /* out of room */
5127 goto done;
5128 }
5129 *p++ = ',';
5130 len--;
5131 }
5132
5133 /* copy the string */
5134 while ((c = *s++) != 0) {
5135 if (len == 0) {
5136 result = 1; /* out of room */
5137 goto done;
5138 }
5139 *p++ = c;
5140 len--;
5141 }
5142
5143done:
5144 /* write return values */
5145 *curp = p;
5146 *lenp = len;
5147
5148 return result;
5149}
5150
5151/*
5152 * Using the given flag table, print a comma separated string into
5153 * the buffer. End in '*' if the buffer is too short.
5154 */
5155static char *flag_string(char *buf, int buf_len, u64 flags,
5156 struct flag_table *table, int table_size)
5157{
5158 char extra[32];
5159 char *p = buf;
5160 int len = buf_len;
5161 int no_room = 0;
5162 int i;
5163
5164 /* make sure there is at least 2 so we can form "*" */
5165 if (len < 2)
5166 return "";
5167
5168 len--; /* leave room for a nul */
5169 for (i = 0; i < table_size; i++) {
5170 if (flags & table[i].flag) {
5171 no_room = append_str(buf, &p, &len, table[i].str);
5172 if (no_room)
5173 break;
5174 flags &= ~table[i].flag;
5175 }
5176 }
5177
5178 /* any undocumented bits left? */
5179 if (!no_room && flags) {
5180 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5181 no_room = append_str(buf, &p, &len, extra);
5182 }
5183
5184 /* add * if ran out of room */
5185 if (no_room) {
5186 /* may need to back up to add space for a '*' */
5187 if (len == 0)
5188 --p;
5189 *p++ = '*';
5190 }
5191
5192 /* add final nul - space already allocated above */
5193 *p = 0;
5194 return buf;
5195}
5196
5197/* first 8 CCE error interrupt source names */
5198static const char * const cce_misc_names[] = {
5199 "CceErrInt", /* 0 */
5200 "RxeErrInt", /* 1 */
5201 "MiscErrInt", /* 2 */
5202 "Reserved3", /* 3 */
5203 "PioErrInt", /* 4 */
5204 "SDmaErrInt", /* 5 */
5205 "EgressErrInt", /* 6 */
5206 "TxeErrInt" /* 7 */
5207};
5208
5209/*
5210 * Return the miscellaneous error interrupt name.
5211 */
5212static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5213{
5214 if (source < ARRAY_SIZE(cce_misc_names))
5215 strncpy(buf, cce_misc_names[source], bsize);
5216 else
5217 snprintf(buf,
5218 bsize,
5219 "Reserved%u",
5220 source + IS_GENERAL_ERR_START);
5221
5222 return buf;
5223}
5224
5225/*
5226 * Return the SDMA engine error interrupt name.
5227 */
5228static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5229{
5230 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5231 return buf;
5232}
5233
5234/*
5235 * Return the send context error interrupt name.
5236 */
5237static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5238{
5239 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5240 return buf;
5241}
5242
5243static const char * const various_names[] = {
5244 "PbcInt",
5245 "GpioAssertInt",
5246 "Qsfp1Int",
5247 "Qsfp2Int",
5248 "TCritInt"
5249};
5250
5251/*
5252 * Return the various interrupt name.
5253 */
5254static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5255{
5256 if (source < ARRAY_SIZE(various_names))
5257 strncpy(buf, various_names[source], bsize);
5258 else
Jubin John8638b772016-02-14 20:19:24 -08005259 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005260 return buf;
5261}
5262
5263/*
5264 * Return the DC interrupt name.
5265 */
5266static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5267{
5268 static const char * const dc_int_names[] = {
5269 "common",
5270 "lcb",
5271 "8051",
5272 "lbm" /* local block merge */
5273 };
5274
5275 if (source < ARRAY_SIZE(dc_int_names))
5276 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5277 else
5278 snprintf(buf, bsize, "DCInt%u", source);
5279 return buf;
5280}
5281
5282static const char * const sdma_int_names[] = {
5283 "SDmaInt",
5284 "SdmaIdleInt",
5285 "SdmaProgressInt",
5286};
5287
5288/*
5289 * Return the SDMA engine interrupt name.
5290 */
5291static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5292{
5293 /* what interrupt */
5294 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5295 /* which engine */
5296 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5297
5298 if (likely(what < 3))
5299 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5300 else
5301 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5302 return buf;
5303}
5304
5305/*
5306 * Return the receive available interrupt name.
5307 */
5308static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5309{
5310 snprintf(buf, bsize, "RcvAvailInt%u", source);
5311 return buf;
5312}
5313
5314/*
5315 * Return the receive urgent interrupt name.
5316 */
5317static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5318{
5319 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5320 return buf;
5321}
5322
5323/*
5324 * Return the send credit interrupt name.
5325 */
5326static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5327{
5328 snprintf(buf, bsize, "SendCreditInt%u", source);
5329 return buf;
5330}
5331
5332/*
5333 * Return the reserved interrupt name.
5334 */
5335static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5336{
5337 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5338 return buf;
5339}
5340
5341static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5342{
5343 return flag_string(buf, buf_len, flags,
5344 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5345}
5346
5347static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5348{
5349 return flag_string(buf, buf_len, flags,
5350 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5351}
5352
5353static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5354{
5355 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5356 ARRAY_SIZE(misc_err_status_flags));
5357}
5358
5359static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5360{
5361 return flag_string(buf, buf_len, flags,
5362 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5363}
5364
5365static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5366{
5367 return flag_string(buf, buf_len, flags,
5368 sdma_err_status_flags,
5369 ARRAY_SIZE(sdma_err_status_flags));
5370}
5371
5372static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5373{
5374 return flag_string(buf, buf_len, flags,
5375 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5376}
5377
5378static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5379{
5380 return flag_string(buf, buf_len, flags,
5381 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5382}
5383
5384static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5385{
5386 return flag_string(buf, buf_len, flags,
5387 send_err_status_flags,
5388 ARRAY_SIZE(send_err_status_flags));
5389}
5390
5391static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5392{
5393 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005394 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005395
5396 /*
5397 * For most these errors, there is nothing that can be done except
5398 * report or record it.
5399 */
5400 dd_dev_info(dd, "CCE Error: %s\n",
5401 cce_err_status_string(buf, sizeof(buf), reg));
5402
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005403 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5404 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005405 /* this error requires a manual drop into SPC freeze mode */
5406 /* then a fix up */
5407 start_freeze_handling(dd->pport, FREEZE_SELF);
5408 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005409
5410 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5411 if (reg & (1ull << i)) {
5412 incr_cntr64(&dd->cce_err_status_cnt[i]);
5413 /* maintain a counter over all cce_err_status errors */
5414 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5415 }
5416 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005417}
5418
5419/*
5420 * Check counters for receive errors that do not have an interrupt
5421 * associated with them.
5422 */
5423#define RCVERR_CHECK_TIME 10
5424static void update_rcverr_timer(unsigned long opaque)
5425{
5426 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5427 struct hfi1_pportdata *ppd = dd->pport;
5428 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5429
5430 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5431 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5432 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5433 set_link_down_reason(ppd,
5434 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5435 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5436 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5437 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005438 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005439
5440 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5441}
5442
5443static int init_rcverr(struct hfi1_devdata *dd)
5444{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305445 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005446 /* Assume the hardware counter has been reset */
5447 dd->rcv_ovfl_cnt = 0;
5448 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5449}
5450
5451static void free_rcverr(struct hfi1_devdata *dd)
5452{
5453 if (dd->rcverr_timer.data)
5454 del_timer_sync(&dd->rcverr_timer);
5455 dd->rcverr_timer.data = 0;
5456}
5457
5458static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5459{
5460 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005461 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005462
5463 dd_dev_info(dd, "Receive Error: %s\n",
5464 rxe_err_status_string(buf, sizeof(buf), reg));
5465
5466 if (reg & ALL_RXE_FREEZE_ERR) {
5467 int flags = 0;
5468
5469 /*
5470 * Freeze mode recovery is disabled for the errors
5471 * in RXE_FREEZE_ABORT_MASK
5472 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005473 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005474 flags = FREEZE_ABORT;
5475
5476 start_freeze_handling(dd->pport, flags);
5477 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005478
5479 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5480 if (reg & (1ull << i))
5481 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5482 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005483}
5484
5485static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5486{
5487 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005488 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005489
5490 dd_dev_info(dd, "Misc Error: %s",
5491 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005492 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5493 if (reg & (1ull << i))
5494 incr_cntr64(&dd->misc_err_status_cnt[i]);
5495 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005496}
5497
5498static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5499{
5500 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005501 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005502
5503 dd_dev_info(dd, "PIO Error: %s\n",
5504 pio_err_status_string(buf, sizeof(buf), reg));
5505
5506 if (reg & ALL_PIO_FREEZE_ERR)
5507 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005508
5509 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5510 if (reg & (1ull << i))
5511 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5512 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005513}
5514
5515static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5516{
5517 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005518 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005519
5520 dd_dev_info(dd, "SDMA Error: %s\n",
5521 sdma_err_status_string(buf, sizeof(buf), reg));
5522
5523 if (reg & ALL_SDMA_FREEZE_ERR)
5524 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005525
5526 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5527 if (reg & (1ull << i))
5528 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5529 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005530}
5531
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005532static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5533{
5534 incr_cntr64(&ppd->port_xmit_discards);
5535}
5536
Mike Marciniszyn77241052015-07-30 15:17:43 -04005537static void count_port_inactive(struct hfi1_devdata *dd)
5538{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005539 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005540}
5541
5542/*
5543 * We have had a "disallowed packet" error during egress. Determine the
5544 * integrity check which failed, and update relevant error counter, etc.
5545 *
5546 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5547 * bit of state per integrity check, and so we can miss the reason for an
5548 * egress error if more than one packet fails the same integrity check
5549 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5550 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005551static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5552 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005553{
5554 struct hfi1_pportdata *ppd = dd->pport;
5555 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5556 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5557 char buf[96];
5558
5559 /* clear down all observed info as quickly as possible after read */
5560 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5561
5562 dd_dev_info(dd,
5563 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5564 info, egress_err_info_string(buf, sizeof(buf), info), src);
5565
5566 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005567 if (info & PORT_DISCARD_EGRESS_ERRS) {
5568 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005569
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005570 /*
5571 * Count all, in case multiple bits are set. Reminder:
5572 * since there is only one info register for many sources,
5573 * these may be attributed to the wrong VL if they occur
5574 * too close together.
5575 */
5576 weight = hweight64(info);
5577 for (i = 0; i < weight; i++) {
5578 __count_port_discards(ppd);
5579 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5580 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5581 else if (vl == 15)
5582 incr_cntr64(&ppd->port_xmit_discards_vl
5583 [C_VL_15]);
5584 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005585 }
5586}
5587
5588/*
5589 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5590 * register. Does it represent a 'port inactive' error?
5591 */
5592static inline int port_inactive_err(u64 posn)
5593{
5594 return (posn >= SEES(TX_LINKDOWN) &&
5595 posn <= SEES(TX_INCORRECT_LINK_STATE));
5596}
5597
5598/*
5599 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5600 * register. Does it represent a 'disallowed packet' error?
5601 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005602static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005603{
5604 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5605 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5606}
5607
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005608/*
5609 * Input value is a bit position of one of the SDMA engine disallowed
5610 * packet errors. Return which engine. Use of this must be guarded by
5611 * disallowed_pkt_err().
5612 */
5613static inline int disallowed_pkt_engine(int posn)
5614{
5615 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5616}
5617
5618/*
5619 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5620 * be done.
5621 */
5622static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5623{
5624 struct sdma_vl_map *m;
5625 int vl;
5626
5627 /* range check */
5628 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5629 return -1;
5630
5631 rcu_read_lock();
5632 m = rcu_dereference(dd->sdma_map);
5633 vl = m->engine_to_vl[engine];
5634 rcu_read_unlock();
5635
5636 return vl;
5637}
5638
5639/*
5640 * Translate the send context (sofware index) into a VL. Return -1 if the
5641 * translation cannot be done.
5642 */
5643static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5644{
5645 struct send_context_info *sci;
5646 struct send_context *sc;
5647 int i;
5648
5649 sci = &dd->send_contexts[sw_index];
5650
5651 /* there is no information for user (PSM) and ack contexts */
5652 if (sci->type != SC_KERNEL)
5653 return -1;
5654
5655 sc = sci->sc;
5656 if (!sc)
5657 return -1;
5658 if (dd->vld[15].sc == sc)
5659 return 15;
5660 for (i = 0; i < num_vls; i++)
5661 if (dd->vld[i].sc == sc)
5662 return i;
5663
5664 return -1;
5665}
5666
Mike Marciniszyn77241052015-07-30 15:17:43 -04005667static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5668{
5669 u64 reg_copy = reg, handled = 0;
5670 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005671 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005672
5673 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5674 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005675 else if (is_ax(dd) &&
5676 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5677 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005678 start_freeze_handling(dd->pport, 0);
5679
5680 while (reg_copy) {
5681 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005682 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005683 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005684 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005685
5686 if (port_inactive_err(shift)) {
5687 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005688 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005689 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005690 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5691
5692 handle_send_egress_err_info(dd, vl);
5693 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005694 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005695 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005696 }
5697
5698 reg &= ~handled;
5699
5700 if (reg)
5701 dd_dev_info(dd, "Egress Error: %s\n",
5702 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005703
5704 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5705 if (reg & (1ull << i))
5706 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5707 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005708}
5709
5710static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5711{
5712 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005713 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005714
5715 dd_dev_info(dd, "Send Error: %s\n",
5716 send_err_status_string(buf, sizeof(buf), reg));
5717
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005718 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5719 if (reg & (1ull << i))
5720 incr_cntr64(&dd->send_err_status_cnt[i]);
5721 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005722}
5723
5724/*
5725 * The maximum number of times the error clear down will loop before
5726 * blocking a repeating error. This value is arbitrary.
5727 */
5728#define MAX_CLEAR_COUNT 20
5729
5730/*
5731 * Clear and handle an error register. All error interrupts are funneled
5732 * through here to have a central location to correctly handle single-
5733 * or multi-shot errors.
5734 *
5735 * For non per-context registers, call this routine with a context value
5736 * of 0 so the per-context offset is zero.
5737 *
5738 * If the handler loops too many times, assume that something is wrong
5739 * and can't be fixed, so mask the error bits.
5740 */
5741static void interrupt_clear_down(struct hfi1_devdata *dd,
5742 u32 context,
5743 const struct err_reg_info *eri)
5744{
5745 u64 reg;
5746 u32 count;
5747
5748 /* read in a loop until no more errors are seen */
5749 count = 0;
5750 while (1) {
5751 reg = read_kctxt_csr(dd, context, eri->status);
5752 if (reg == 0)
5753 break;
5754 write_kctxt_csr(dd, context, eri->clear, reg);
5755 if (likely(eri->handler))
5756 eri->handler(dd, context, reg);
5757 count++;
5758 if (count > MAX_CLEAR_COUNT) {
5759 u64 mask;
5760
5761 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5762 eri->desc, reg);
5763 /*
5764 * Read-modify-write so any other masked bits
5765 * remain masked.
5766 */
5767 mask = read_kctxt_csr(dd, context, eri->mask);
5768 mask &= ~reg;
5769 write_kctxt_csr(dd, context, eri->mask, mask);
5770 break;
5771 }
5772 }
5773}
5774
5775/*
5776 * CCE block "misc" interrupt. Source is < 16.
5777 */
5778static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5779{
5780 const struct err_reg_info *eri = &misc_errs[source];
5781
5782 if (eri->handler) {
5783 interrupt_clear_down(dd, 0, eri);
5784 } else {
5785 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5786 source);
5787 }
5788}
5789
5790static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5791{
5792 return flag_string(buf, buf_len, flags,
5793 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5794}
5795
5796/*
5797 * Send context error interrupt. Source (hw_context) is < 160.
5798 *
5799 * All send context errors cause the send context to halt. The normal
5800 * clear-down mechanism cannot be used because we cannot clear the
5801 * error bits until several other long-running items are done first.
5802 * This is OK because with the context halted, nothing else is going
5803 * to happen on it anyway.
5804 */
5805static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5806 unsigned int hw_context)
5807{
5808 struct send_context_info *sci;
5809 struct send_context *sc;
5810 char flags[96];
5811 u64 status;
5812 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005813 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005814
5815 sw_index = dd->hw_to_sw[hw_context];
5816 if (sw_index >= dd->num_send_contexts) {
5817 dd_dev_err(dd,
5818 "out of range sw index %u for send context %u\n",
5819 sw_index, hw_context);
5820 return;
5821 }
5822 sci = &dd->send_contexts[sw_index];
5823 sc = sci->sc;
5824 if (!sc) {
5825 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5826 sw_index, hw_context);
5827 return;
5828 }
5829
5830 /* tell the software that a halt has begun */
5831 sc_stop(sc, SCF_HALTED);
5832
5833 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5834
5835 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5836 send_context_err_status_string(flags, sizeof(flags), status));
5837
5838 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005839 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005840
5841 /*
5842 * Automatically restart halted kernel contexts out of interrupt
5843 * context. User contexts must ask the driver to restart the context.
5844 */
5845 if (sc->type != SC_USER)
5846 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005847
5848 /*
5849 * Update the counters for the corresponding status bits.
5850 * Note that these particular counters are aggregated over all
5851 * 160 contexts.
5852 */
5853 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5854 if (status & (1ull << i))
5855 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5856 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005857}
5858
5859static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5860 unsigned int source, u64 status)
5861{
5862 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005863 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005864
5865 sde = &dd->per_sdma[source];
5866#ifdef CONFIG_SDMA_VERBOSITY
5867 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5868 slashstrip(__FILE__), __LINE__, __func__);
5869 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5870 sde->this_idx, source, (unsigned long long)status);
5871#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005872 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005873 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005874
5875 /*
5876 * Update the counters for the corresponding status bits.
5877 * Note that these particular counters are aggregated over
5878 * all 16 DMA engines.
5879 */
5880 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5881 if (status & (1ull << i))
5882 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5883 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005884}
5885
5886/*
5887 * CCE block SDMA error interrupt. Source is < 16.
5888 */
5889static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5890{
5891#ifdef CONFIG_SDMA_VERBOSITY
5892 struct sdma_engine *sde = &dd->per_sdma[source];
5893
5894 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5895 slashstrip(__FILE__), __LINE__, __func__);
5896 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5897 source);
5898 sdma_dumpstate(sde);
5899#endif
5900 interrupt_clear_down(dd, source, &sdma_eng_err);
5901}
5902
5903/*
5904 * CCE block "various" interrupt. Source is < 8.
5905 */
5906static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5907{
5908 const struct err_reg_info *eri = &various_err[source];
5909
5910 /*
5911 * TCritInt cannot go through interrupt_clear_down()
5912 * because it is not a second tier interrupt. The handler
5913 * should be called directly.
5914 */
5915 if (source == TCRIT_INT_SOURCE)
5916 handle_temp_err(dd);
5917 else if (eri->handler)
5918 interrupt_clear_down(dd, 0, eri);
5919 else
5920 dd_dev_info(dd,
5921 "%s: Unimplemented/reserved interrupt %d\n",
5922 __func__, source);
5923}
5924
5925static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5926{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005927 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005928 struct hfi1_pportdata *ppd = dd->pport;
5929 unsigned long flags;
5930 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5931
5932 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005933 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5934 __func__);
5935
5936 if (!qsfp_mod_present(ppd)) {
5937 ppd->driver_link_ready = 0;
5938 /*
5939 * Cable removed, reset all our information about the
5940 * cache and cable capabilities
5941 */
5942
5943 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5944 /*
5945 * We don't set cache_refresh_required here as we expect
5946 * an interrupt when a cable is inserted
5947 */
5948 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005949 ppd->qsfp_info.reset_needed = 0;
5950 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005951 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5952 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005953 /* Invert the ModPresent pin now to detect plug-in */
5954 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5955 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005956
5957 if ((ppd->offline_disabled_reason >
5958 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005959 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08005960 (ppd->offline_disabled_reason ==
5961 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5962 ppd->offline_disabled_reason =
5963 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08005964 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005965
Mike Marciniszyn77241052015-07-30 15:17:43 -04005966 if (ppd->host_link_state == HLS_DN_POLL) {
5967 /*
5968 * The link is still in POLL. This means
5969 * that the normal link down processing
5970 * will not happen. We have to do it here
5971 * before turning the DC off.
5972 */
5973 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5974 }
5975 } else {
5976 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5977 ppd->qsfp_info.cache_valid = 0;
5978 ppd->qsfp_info.cache_refresh_required = 1;
5979 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5980 flags);
5981
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005982 /*
5983 * Stop inversion of ModPresent pin to detect
5984 * removal of the cable
5985 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005986 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005987 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5988 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5989
5990 ppd->offline_disabled_reason =
5991 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005992 }
5993 }
5994
5995 if (reg & QSFP_HFI0_INT_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005996 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5997 __func__);
5998 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5999 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006000 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6001 }
6002
6003 /* Schedule the QSFP work only if there is a cable attached. */
6004 if (qsfp_mod_present(ppd))
6005 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6006}
6007
6008static int request_host_lcb_access(struct hfi1_devdata *dd)
6009{
6010 int ret;
6011
6012 ret = do_8051_command(dd, HCMD_MISC,
6013 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6014 NULL);
6015 if (ret != HCMD_SUCCESS) {
6016 dd_dev_err(dd, "%s: command failed with error %d\n",
6017 __func__, ret);
6018 }
6019 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6020}
6021
6022static int request_8051_lcb_access(struct hfi1_devdata *dd)
6023{
6024 int ret;
6025
6026 ret = do_8051_command(dd, HCMD_MISC,
6027 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6028 NULL);
6029 if (ret != HCMD_SUCCESS) {
6030 dd_dev_err(dd, "%s: command failed with error %d\n",
6031 __func__, ret);
6032 }
6033 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6034}
6035
6036/*
6037 * Set the LCB selector - allow host access. The DCC selector always
6038 * points to the host.
6039 */
6040static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6041{
6042 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6043 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
6044 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6045}
6046
6047/*
6048 * Clear the LCB selector - allow 8051 access. The DCC selector always
6049 * points to the host.
6050 */
6051static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6052{
6053 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6054 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6055}
6056
6057/*
6058 * Acquire LCB access from the 8051. If the host already has access,
6059 * just increment a counter. Otherwise, inform the 8051 that the
6060 * host is taking access.
6061 *
6062 * Returns:
6063 * 0 on success
6064 * -EBUSY if the 8051 has control and cannot be disturbed
6065 * -errno if unable to acquire access from the 8051
6066 */
6067int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6068{
6069 struct hfi1_pportdata *ppd = dd->pport;
6070 int ret = 0;
6071
6072 /*
6073 * Use the host link state lock so the operation of this routine
6074 * { link state check, selector change, count increment } can occur
6075 * as a unit against a link state change. Otherwise there is a
6076 * race between the state change and the count increment.
6077 */
6078 if (sleep_ok) {
6079 mutex_lock(&ppd->hls_lock);
6080 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006081 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006082 udelay(1);
6083 }
6084
6085 /* this access is valid only when the link is up */
6086 if ((ppd->host_link_state & HLS_UP) == 0) {
6087 dd_dev_info(dd, "%s: link state %s not up\n",
6088 __func__, link_state_name(ppd->host_link_state));
6089 ret = -EBUSY;
6090 goto done;
6091 }
6092
6093 if (dd->lcb_access_count == 0) {
6094 ret = request_host_lcb_access(dd);
6095 if (ret) {
6096 dd_dev_err(dd,
6097 "%s: unable to acquire LCB access, err %d\n",
6098 __func__, ret);
6099 goto done;
6100 }
6101 set_host_lcb_access(dd);
6102 }
6103 dd->lcb_access_count++;
6104done:
6105 mutex_unlock(&ppd->hls_lock);
6106 return ret;
6107}
6108
6109/*
6110 * Release LCB access by decrementing the use count. If the count is moving
6111 * from 1 to 0, inform 8051 that it has control back.
6112 *
6113 * Returns:
6114 * 0 on success
6115 * -errno if unable to release access to the 8051
6116 */
6117int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6118{
6119 int ret = 0;
6120
6121 /*
6122 * Use the host link state lock because the acquire needed it.
6123 * Here, we only need to keep { selector change, count decrement }
6124 * as a unit.
6125 */
6126 if (sleep_ok) {
6127 mutex_lock(&dd->pport->hls_lock);
6128 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006129 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006130 udelay(1);
6131 }
6132
6133 if (dd->lcb_access_count == 0) {
6134 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6135 __func__);
6136 goto done;
6137 }
6138
6139 if (dd->lcb_access_count == 1) {
6140 set_8051_lcb_access(dd);
6141 ret = request_8051_lcb_access(dd);
6142 if (ret) {
6143 dd_dev_err(dd,
6144 "%s: unable to release LCB access, err %d\n",
6145 __func__, ret);
6146 /* restore host access if the grant didn't work */
6147 set_host_lcb_access(dd);
6148 goto done;
6149 }
6150 }
6151 dd->lcb_access_count--;
6152done:
6153 mutex_unlock(&dd->pport->hls_lock);
6154 return ret;
6155}
6156
6157/*
6158 * Initialize LCB access variables and state. Called during driver load,
6159 * after most of the initialization is finished.
6160 *
6161 * The DC default is LCB access on for the host. The driver defaults to
6162 * leaving access to the 8051. Assign access now - this constrains the call
6163 * to this routine to be after all LCB set-up is done. In particular, after
6164 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6165 */
6166static void init_lcb_access(struct hfi1_devdata *dd)
6167{
6168 dd->lcb_access_count = 0;
6169}
6170
6171/*
6172 * Write a response back to a 8051 request.
6173 */
6174static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6175{
6176 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6177 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6178 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6179 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6180}
6181
6182/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006183 * Handle host requests from the 8051.
6184 *
6185 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006186 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006187void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006188{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006189 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6190 dc_host_req_work);
6191 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006192 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006193 u16 data = 0;
6194 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6195 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006196
6197 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6198 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6199 return; /* no request */
6200
6201 /* zero out COMPLETED so the response is seen */
6202 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6203
6204 /* extract request details */
6205 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6206 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6207 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6208 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6209
6210 switch (type) {
6211 case HREQ_LOAD_CONFIG:
6212 case HREQ_SAVE_CONFIG:
6213 case HREQ_READ_CONFIG:
6214 case HREQ_SET_TX_EQ_ABS:
6215 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006216 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6217 type);
6218 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6219 break;
6220
Easwar Hariharancbac3862016-02-03 14:31:31 -08006221 case HREQ_ENABLE:
6222 lanes = data & 0xF;
6223 for (i = 0; lanes; lanes >>= 1, i++) {
6224 if (!(lanes & 1))
6225 continue;
6226 if (data & 0x200) {
6227 /* enable TX CDR */
6228 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6229 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6230 cdr_ctrl_byte |= (1 << (i + 4));
6231 } else {
6232 /* disable TX CDR */
6233 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6234 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6235 cdr_ctrl_byte &= ~(1 << (i + 4));
6236 }
6237
6238 if (data & 0x800) {
6239 /* enable RX CDR */
6240 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6241 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6242 cdr_ctrl_byte |= (1 << i);
6243 } else {
6244 /* disable RX CDR */
6245 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6246 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6247 cdr_ctrl_byte &= ~(1 << i);
6248 }
6249 }
6250 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6251 &cdr_ctrl_byte, 1);
6252 hreq_response(dd, HREQ_SUCCESS, data);
6253 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6254 break;
6255
Mike Marciniszyn77241052015-07-30 15:17:43 -04006256 case HREQ_CONFIG_DONE:
6257 hreq_response(dd, HREQ_SUCCESS, 0);
6258 break;
6259
6260 case HREQ_INTERFACE_TEST:
6261 hreq_response(dd, HREQ_SUCCESS, data);
6262 break;
6263
6264 default:
6265 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6266 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6267 break;
6268 }
6269}
6270
6271static void write_global_credit(struct hfi1_devdata *dd,
6272 u8 vau, u16 total, u16 shared)
6273{
6274 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6275 ((u64)total
6276 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6277 | ((u64)shared
6278 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6279 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6280}
6281
6282/*
6283 * Set up initial VL15 credits of the remote. Assumes the rest of
6284 * the CM credit registers are zero from a previous global or credit reset .
6285 */
6286void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6287{
6288 /* leave shared count at zero for both global and VL15 */
6289 write_global_credit(dd, vau, vl15buf, 0);
6290
6291 /* We may need some credits for another VL when sending packets
6292 * with the snoop interface. Dividing it down the middle for VL15
6293 * and VL0 should suffice.
6294 */
6295 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6296 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6297 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6298 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6299 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6300 } else {
6301 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6302 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6303 }
6304}
6305
6306/*
6307 * Zero all credit details from the previous connection and
6308 * reset the CM manager's internal counters.
6309 */
6310void reset_link_credits(struct hfi1_devdata *dd)
6311{
6312 int i;
6313
6314 /* remove all previous VL credit limits */
6315 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006316 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006317 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6318 write_global_credit(dd, 0, 0, 0);
6319 /* reset the CM block */
6320 pio_send_control(dd, PSC_CM_RESET);
6321}
6322
6323/* convert a vCU to a CU */
6324static u32 vcu_to_cu(u8 vcu)
6325{
6326 return 1 << vcu;
6327}
6328
6329/* convert a CU to a vCU */
6330static u8 cu_to_vcu(u32 cu)
6331{
6332 return ilog2(cu);
6333}
6334
6335/* convert a vAU to an AU */
6336static u32 vau_to_au(u8 vau)
6337{
6338 return 8 * (1 << vau);
6339}
6340
6341static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6342{
6343 ppd->sm_trap_qp = 0x0;
6344 ppd->sa_qp = 0x1;
6345}
6346
6347/*
6348 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6349 */
6350static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6351{
6352 u64 reg;
6353
6354 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6355 write_csr(dd, DC_LCB_CFG_RUN, 0);
6356 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6357 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6358 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6359 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6360 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6361 reg = read_csr(dd, DCC_CFG_RESET);
6362 write_csr(dd, DCC_CFG_RESET,
6363 reg
6364 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6365 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006366 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006367 if (!abort) {
6368 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6369 write_csr(dd, DCC_CFG_RESET, reg);
6370 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6371 }
6372}
6373
6374/*
6375 * This routine should be called after the link has been transitioned to
6376 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6377 * reset).
6378 *
6379 * The expectation is that the caller of this routine would have taken
6380 * care of properly transitioning the link into the correct state.
6381 */
6382static void dc_shutdown(struct hfi1_devdata *dd)
6383{
6384 unsigned long flags;
6385
6386 spin_lock_irqsave(&dd->dc8051_lock, flags);
6387 if (dd->dc_shutdown) {
6388 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6389 return;
6390 }
6391 dd->dc_shutdown = 1;
6392 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6393 /* Shutdown the LCB */
6394 lcb_shutdown(dd, 1);
6395 /* Going to OFFLINE would have causes the 8051 to put the
6396 * SerDes into reset already. Just need to shut down the 8051,
6397 * itself. */
6398 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6399}
6400
6401/* Calling this after the DC has been brought out of reset should not
6402 * do any damage. */
6403static void dc_start(struct hfi1_devdata *dd)
6404{
6405 unsigned long flags;
6406 int ret;
6407
6408 spin_lock_irqsave(&dd->dc8051_lock, flags);
6409 if (!dd->dc_shutdown)
6410 goto done;
6411 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6412 /* Take the 8051 out of reset */
6413 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6414 /* Wait until 8051 is ready */
6415 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6416 if (ret) {
6417 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6418 __func__);
6419 }
6420 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6421 write_csr(dd, DCC_CFG_RESET, 0x10);
6422 /* lcb_shutdown() with abort=1 does not restore these */
6423 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6424 spin_lock_irqsave(&dd->dc8051_lock, flags);
6425 dd->dc_shutdown = 0;
6426done:
6427 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6428}
6429
6430/*
6431 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6432 */
6433static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6434{
6435 u64 rx_radr, tx_radr;
6436 u32 version;
6437
6438 if (dd->icode != ICODE_FPGA_EMULATION)
6439 return;
6440
6441 /*
6442 * These LCB defaults on emulator _s are good, nothing to do here:
6443 * LCB_CFG_TX_FIFOS_RADR
6444 * LCB_CFG_RX_FIFOS_RADR
6445 * LCB_CFG_LN_DCLK
6446 * LCB_CFG_IGNORE_LOST_RCLK
6447 */
6448 if (is_emulator_s(dd))
6449 return;
6450 /* else this is _p */
6451
6452 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006453 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006454 version = 0x2d; /* all B0 use 0x2d or higher settings */
6455
6456 if (version <= 0x12) {
6457 /* release 0x12 and below */
6458
6459 /*
6460 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6461 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6462 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6463 */
6464 rx_radr =
6465 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6466 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6467 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6468 /*
6469 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6470 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6471 */
6472 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6473 } else if (version <= 0x18) {
6474 /* release 0x13 up to 0x18 */
6475 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6476 rx_radr =
6477 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6478 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6479 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6480 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6481 } else if (version == 0x19) {
6482 /* release 0x19 */
6483 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6484 rx_radr =
6485 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6486 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6487 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6488 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6489 } else if (version == 0x1a) {
6490 /* release 0x1a */
6491 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6492 rx_radr =
6493 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6494 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6495 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6496 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6497 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6498 } else {
6499 /* release 0x1b and higher */
6500 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6501 rx_radr =
6502 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6503 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6504 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6505 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6506 }
6507
6508 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6509 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6510 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6511 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6512 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6513}
6514
6515/*
6516 * Handle a SMA idle message
6517 *
6518 * This is a work-queue function outside of the interrupt.
6519 */
6520void handle_sma_message(struct work_struct *work)
6521{
6522 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6523 sma_message_work);
6524 struct hfi1_devdata *dd = ppd->dd;
6525 u64 msg;
6526 int ret;
6527
6528 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6529 is stripped off */
6530 ret = read_idle_sma(dd, &msg);
6531 if (ret)
6532 return;
6533 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6534 /*
6535 * React to the SMA message. Byte[1] (0 for us) is the command.
6536 */
6537 switch (msg & 0xff) {
6538 case SMA_IDLE_ARM:
6539 /*
6540 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6541 * State Transitions
6542 *
6543 * Only expected in INIT or ARMED, discard otherwise.
6544 */
6545 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6546 ppd->neighbor_normal = 1;
6547 break;
6548 case SMA_IDLE_ACTIVE:
6549 /*
6550 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6551 * State Transitions
6552 *
6553 * Can activate the node. Discard otherwise.
6554 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006555 if (ppd->host_link_state == HLS_UP_ARMED &&
6556 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006557 ppd->neighbor_normal = 1;
6558 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6559 if (ret)
6560 dd_dev_err(
6561 dd,
6562 "%s: received Active SMA idle message, couldn't set link to Active\n",
6563 __func__);
6564 }
6565 break;
6566 default:
6567 dd_dev_err(dd,
6568 "%s: received unexpected SMA idle message 0x%llx\n",
6569 __func__, msg);
6570 break;
6571 }
6572}
6573
6574static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6575{
6576 u64 rcvctrl;
6577 unsigned long flags;
6578
6579 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6580 rcvctrl = read_csr(dd, RCV_CTRL);
6581 rcvctrl |= add;
6582 rcvctrl &= ~clear;
6583 write_csr(dd, RCV_CTRL, rcvctrl);
6584 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6585}
6586
6587static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6588{
6589 adjust_rcvctrl(dd, add, 0);
6590}
6591
6592static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6593{
6594 adjust_rcvctrl(dd, 0, clear);
6595}
6596
6597/*
6598 * Called from all interrupt handlers to start handling an SPC freeze.
6599 */
6600void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6601{
6602 struct hfi1_devdata *dd = ppd->dd;
6603 struct send_context *sc;
6604 int i;
6605
6606 if (flags & FREEZE_SELF)
6607 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6608
6609 /* enter frozen mode */
6610 dd->flags |= HFI1_FROZEN;
6611
6612 /* notify all SDMA engines that they are going into a freeze */
6613 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6614
6615 /* do halt pre-handling on all enabled send contexts */
6616 for (i = 0; i < dd->num_send_contexts; i++) {
6617 sc = dd->send_contexts[i].sc;
6618 if (sc && (sc->flags & SCF_ENABLED))
6619 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6620 }
6621
6622 /* Send context are frozen. Notify user space */
6623 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6624
6625 if (flags & FREEZE_ABORT) {
6626 dd_dev_err(dd,
6627 "Aborted freeze recovery. Please REBOOT system\n");
6628 return;
6629 }
6630 /* queue non-interrupt handler */
6631 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6632}
6633
6634/*
6635 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6636 * depending on the "freeze" parameter.
6637 *
6638 * No need to return an error if it times out, our only option
6639 * is to proceed anyway.
6640 */
6641static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6642{
6643 unsigned long timeout;
6644 u64 reg;
6645
6646 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6647 while (1) {
6648 reg = read_csr(dd, CCE_STATUS);
6649 if (freeze) {
6650 /* waiting until all indicators are set */
6651 if ((reg & ALL_FROZE) == ALL_FROZE)
6652 return; /* all done */
6653 } else {
6654 /* waiting until all indicators are clear */
6655 if ((reg & ALL_FROZE) == 0)
6656 return; /* all done */
6657 }
6658
6659 if (time_after(jiffies, timeout)) {
6660 dd_dev_err(dd,
6661 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6662 freeze ? "" : "un",
6663 reg & ALL_FROZE,
6664 freeze ? ALL_FROZE : 0ull);
6665 return;
6666 }
6667 usleep_range(80, 120);
6668 }
6669}
6670
6671/*
6672 * Do all freeze handling for the RXE block.
6673 */
6674static void rxe_freeze(struct hfi1_devdata *dd)
6675{
6676 int i;
6677
6678 /* disable port */
6679 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6680
6681 /* disable all receive contexts */
6682 for (i = 0; i < dd->num_rcv_contexts; i++)
6683 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6684}
6685
6686/*
6687 * Unfreeze handling for the RXE block - kernel contexts only.
6688 * This will also enable the port. User contexts will do unfreeze
6689 * handling on a per-context basis as they call into the driver.
6690 *
6691 */
6692static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6693{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006694 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006695 int i;
6696
6697 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006698 for (i = 0; i < dd->n_krcv_queues; i++) {
6699 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6700 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6701 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6702 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6703 hfi1_rcvctrl(dd, rcvmask, i);
6704 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006705
6706 /* enable port */
6707 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6708}
6709
6710/*
6711 * Non-interrupt SPC freeze handling.
6712 *
6713 * This is a work-queue function outside of the triggering interrupt.
6714 */
6715void handle_freeze(struct work_struct *work)
6716{
6717 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6718 freeze_work);
6719 struct hfi1_devdata *dd = ppd->dd;
6720
6721 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006722 wait_for_freeze_status(dd, 1);
6723
6724 /* SPC is now frozen */
6725
6726 /* do send PIO freeze steps */
6727 pio_freeze(dd);
6728
6729 /* do send DMA freeze steps */
6730 sdma_freeze(dd);
6731
6732 /* do send egress freeze steps - nothing to do */
6733
6734 /* do receive freeze steps */
6735 rxe_freeze(dd);
6736
6737 /*
6738 * Unfreeze the hardware - clear the freeze, wait for each
6739 * block's frozen bit to clear, then clear the frozen flag.
6740 */
6741 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6742 wait_for_freeze_status(dd, 0);
6743
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006744 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006745 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6746 wait_for_freeze_status(dd, 1);
6747 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6748 wait_for_freeze_status(dd, 0);
6749 }
6750
6751 /* do send PIO unfreeze steps for kernel contexts */
6752 pio_kernel_unfreeze(dd);
6753
6754 /* do send DMA unfreeze steps */
6755 sdma_unfreeze(dd);
6756
6757 /* do send egress unfreeze steps - nothing to do */
6758
6759 /* do receive unfreeze steps for kernel contexts */
6760 rxe_kernel_unfreeze(dd);
6761
6762 /*
6763 * The unfreeze procedure touches global device registers when
6764 * it disables and re-enables RXE. Mark the device unfrozen
6765 * after all that is done so other parts of the driver waiting
6766 * for the device to unfreeze don't do things out of order.
6767 *
6768 * The above implies that the meaning of HFI1_FROZEN flag is
6769 * "Device has gone into freeze mode and freeze mode handling
6770 * is still in progress."
6771 *
6772 * The flag will be removed when freeze mode processing has
6773 * completed.
6774 */
6775 dd->flags &= ~HFI1_FROZEN;
6776 wake_up(&dd->event_queue);
6777
6778 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006779}
6780
6781/*
6782 * Handle a link up interrupt from the 8051.
6783 *
6784 * This is a work-queue function outside of the interrupt.
6785 */
6786void handle_link_up(struct work_struct *work)
6787{
6788 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6789 link_up_work);
6790 set_link_state(ppd, HLS_UP_INIT);
6791
6792 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6793 read_ltp_rtt(ppd->dd);
6794 /*
6795 * OPA specifies that certain counters are cleared on a transition
6796 * to link up, so do that.
6797 */
6798 clear_linkup_counters(ppd->dd);
6799 /*
6800 * And (re)set link up default values.
6801 */
6802 set_linkup_defaults(ppd);
6803
6804 /* enforce link speed enabled */
6805 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6806 /* oops - current speed is not enabled, bounce */
6807 dd_dev_err(ppd->dd,
6808 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6809 ppd->link_speed_active, ppd->link_speed_enabled);
6810 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6811 OPA_LINKDOWN_REASON_SPEED_POLICY);
6812 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006813 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006814 start_link(ppd);
6815 }
6816}
6817
6818/* Several pieces of LNI information were cached for SMA in ppd.
6819 * Reset these on link down */
6820static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6821{
6822 ppd->neighbor_guid = 0;
6823 ppd->neighbor_port_number = 0;
6824 ppd->neighbor_type = 0;
6825 ppd->neighbor_fm_security = 0;
6826}
6827
6828/*
6829 * Handle a link down interrupt from the 8051.
6830 *
6831 * This is a work-queue function outside of the interrupt.
6832 */
6833void handle_link_down(struct work_struct *work)
6834{
6835 u8 lcl_reason, neigh_reason = 0;
6836 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6837 link_down_work);
6838
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006839 if ((ppd->host_link_state &
6840 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6841 ppd->port_type == PORT_TYPE_FIXED)
6842 ppd->offline_disabled_reason =
6843 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6844
6845 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006846 set_link_state(ppd, HLS_DN_OFFLINE);
6847
6848 lcl_reason = 0;
6849 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6850
6851 /*
6852 * If no reason, assume peer-initiated but missed
6853 * LinkGoingDown idle flits.
6854 */
6855 if (neigh_reason == 0)
6856 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6857
6858 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6859
6860 reset_neighbor_info(ppd);
6861
6862 /* disable the port */
6863 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6864
6865 /* If there is no cable attached, turn the DC off. Otherwise,
6866 * start the link bring up. */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006867 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006868 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006869 } else {
6870 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006871 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006872 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006873}
6874
6875void handle_link_bounce(struct work_struct *work)
6876{
6877 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6878 link_bounce_work);
6879
6880 /*
6881 * Only do something if the link is currently up.
6882 */
6883 if (ppd->host_link_state & HLS_UP) {
6884 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006885 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006886 start_link(ppd);
6887 } else {
6888 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6889 __func__, link_state_name(ppd->host_link_state));
6890 }
6891}
6892
6893/*
6894 * Mask conversion: Capability exchange to Port LTP. The capability
6895 * exchange has an implicit 16b CRC that is mandatory.
6896 */
6897static int cap_to_port_ltp(int cap)
6898{
6899 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6900
6901 if (cap & CAP_CRC_14B)
6902 port_ltp |= PORT_LTP_CRC_MODE_14;
6903 if (cap & CAP_CRC_48B)
6904 port_ltp |= PORT_LTP_CRC_MODE_48;
6905 if (cap & CAP_CRC_12B_16B_PER_LANE)
6906 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6907
6908 return port_ltp;
6909}
6910
6911/*
6912 * Convert an OPA Port LTP mask to capability mask
6913 */
6914int port_ltp_to_cap(int port_ltp)
6915{
6916 int cap_mask = 0;
6917
6918 if (port_ltp & PORT_LTP_CRC_MODE_14)
6919 cap_mask |= CAP_CRC_14B;
6920 if (port_ltp & PORT_LTP_CRC_MODE_48)
6921 cap_mask |= CAP_CRC_48B;
6922 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6923 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6924
6925 return cap_mask;
6926}
6927
6928/*
6929 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6930 */
6931static int lcb_to_port_ltp(int lcb_crc)
6932{
6933 int port_ltp = 0;
6934
6935 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6936 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6937 else if (lcb_crc == LCB_CRC_48B)
6938 port_ltp = PORT_LTP_CRC_MODE_48;
6939 else if (lcb_crc == LCB_CRC_14B)
6940 port_ltp = PORT_LTP_CRC_MODE_14;
6941 else
6942 port_ltp = PORT_LTP_CRC_MODE_16;
6943
6944 return port_ltp;
6945}
6946
6947/*
6948 * Our neighbor has indicated that we are allowed to act as a fabric
6949 * manager, so place the full management partition key in the second
6950 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6951 * that we should already have the limited management partition key in
6952 * array element 1, and also that the port is not yet up when
6953 * add_full_mgmt_pkey() is invoked.
6954 */
6955static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6956{
6957 struct hfi1_devdata *dd = ppd->dd;
6958
Dean Luick87645222015-12-01 15:38:21 -05006959 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6960 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6961 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6962 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006963 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6964 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6965}
6966
6967/*
6968 * Convert the given link width to the OPA link width bitmask.
6969 */
6970static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6971{
6972 switch (width) {
6973 case 0:
6974 /*
6975 * Simulator and quick linkup do not set the width.
6976 * Just set it to 4x without complaint.
6977 */
6978 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6979 return OPA_LINK_WIDTH_4X;
6980 return 0; /* no lanes up */
6981 case 1: return OPA_LINK_WIDTH_1X;
6982 case 2: return OPA_LINK_WIDTH_2X;
6983 case 3: return OPA_LINK_WIDTH_3X;
6984 default:
6985 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6986 __func__, width);
6987 /* fall through */
6988 case 4: return OPA_LINK_WIDTH_4X;
6989 }
6990}
6991
6992/*
6993 * Do a population count on the bottom nibble.
6994 */
6995static const u8 bit_counts[16] = {
6996 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6997};
6998static inline u8 nibble_to_count(u8 nibble)
6999{
7000 return bit_counts[nibble & 0xf];
7001}
7002
7003/*
7004 * Read the active lane information from the 8051 registers and return
7005 * their widths.
7006 *
7007 * Active lane information is found in these 8051 registers:
7008 * enable_lane_tx
7009 * enable_lane_rx
7010 */
7011static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7012 u16 *rx_width)
7013{
7014 u16 tx, rx;
7015 u8 enable_lane_rx;
7016 u8 enable_lane_tx;
7017 u8 tx_polarity_inversion;
7018 u8 rx_polarity_inversion;
7019 u8 max_rate;
7020
7021 /* read the active lanes */
7022 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7023 &rx_polarity_inversion, &max_rate);
7024 read_local_lni(dd, &enable_lane_rx);
7025
7026 /* convert to counts */
7027 tx = nibble_to_count(enable_lane_tx);
7028 rx = nibble_to_count(enable_lane_rx);
7029
7030 /*
7031 * Set link_speed_active here, overriding what was set in
7032 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7033 * set the max_rate field in handle_verify_cap until v0.19.
7034 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007035 if ((dd->icode == ICODE_RTL_SILICON) &&
7036 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007037 /* max_rate: 0 = 12.5G, 1 = 25G */
7038 switch (max_rate) {
7039 case 0:
7040 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7041 break;
7042 default:
7043 dd_dev_err(dd,
7044 "%s: unexpected max rate %d, using 25Gb\n",
7045 __func__, (int)max_rate);
7046 /* fall through */
7047 case 1:
7048 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7049 break;
7050 }
7051 }
7052
7053 dd_dev_info(dd,
7054 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7055 enable_lane_tx, tx, enable_lane_rx, rx);
7056 *tx_width = link_width_to_bits(dd, tx);
7057 *rx_width = link_width_to_bits(dd, rx);
7058}
7059
7060/*
7061 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7062 * Valid after the end of VerifyCap and during LinkUp. Does not change
7063 * after link up. I.e. look elsewhere for downgrade information.
7064 *
7065 * Bits are:
7066 * + bits [7:4] contain the number of active transmitters
7067 * + bits [3:0] contain the number of active receivers
7068 * These are numbers 1 through 4 and can be different values if the
7069 * link is asymmetric.
7070 *
7071 * verify_cap_local_fm_link_width[0] retains its original value.
7072 */
7073static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7074 u16 *rx_width)
7075{
7076 u16 widths, tx, rx;
7077 u8 misc_bits, local_flags;
7078 u16 active_tx, active_rx;
7079
7080 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7081 tx = widths >> 12;
7082 rx = (widths >> 8) & 0xf;
7083
7084 *tx_width = link_width_to_bits(dd, tx);
7085 *rx_width = link_width_to_bits(dd, rx);
7086
7087 /* print the active widths */
7088 get_link_widths(dd, &active_tx, &active_rx);
7089}
7090
7091/*
7092 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7093 * hardware information when the link first comes up.
7094 *
7095 * The link width is not available until after VerifyCap.AllFramesReceived
7096 * (the trigger for handle_verify_cap), so this is outside that routine
7097 * and should be called when the 8051 signals linkup.
7098 */
7099void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7100{
7101 u16 tx_width, rx_width;
7102
7103 /* get end-of-LNI link widths */
7104 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7105
7106 /* use tx_width as the link is supposed to be symmetric on link up */
7107 ppd->link_width_active = tx_width;
7108 /* link width downgrade active (LWD.A) starts out matching LW.A */
7109 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7110 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7111 /* per OPA spec, on link up LWD.E resets to LWD.S */
7112 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7113 /* cache the active egress rate (units {10^6 bits/sec]) */
7114 ppd->current_egress_rate = active_egress_rate(ppd);
7115}
7116
7117/*
7118 * Handle a verify capabilities interrupt from the 8051.
7119 *
7120 * This is a work-queue function outside of the interrupt.
7121 */
7122void handle_verify_cap(struct work_struct *work)
7123{
7124 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7125 link_vc_work);
7126 struct hfi1_devdata *dd = ppd->dd;
7127 u64 reg;
7128 u8 power_management;
7129 u8 continious;
7130 u8 vcu;
7131 u8 vau;
7132 u8 z;
7133 u16 vl15buf;
7134 u16 link_widths;
7135 u16 crc_mask;
7136 u16 crc_val;
7137 u16 device_id;
7138 u16 active_tx, active_rx;
7139 u8 partner_supported_crc;
7140 u8 remote_tx_rate;
7141 u8 device_rev;
7142
7143 set_link_state(ppd, HLS_VERIFY_CAP);
7144
7145 lcb_shutdown(dd, 0);
7146 adjust_lcb_for_fpga_serdes(dd);
7147
7148 /*
7149 * These are now valid:
7150 * remote VerifyCap fields in the general LNI config
7151 * CSR DC8051_STS_REMOTE_GUID
7152 * CSR DC8051_STS_REMOTE_NODE_TYPE
7153 * CSR DC8051_STS_REMOTE_FM_SECURITY
7154 * CSR DC8051_STS_REMOTE_PORT_NO
7155 */
7156
7157 read_vc_remote_phy(dd, &power_management, &continious);
7158 read_vc_remote_fabric(
7159 dd,
7160 &vau,
7161 &z,
7162 &vcu,
7163 &vl15buf,
7164 &partner_supported_crc);
7165 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7166 read_remote_device_id(dd, &device_id, &device_rev);
7167 /*
7168 * And the 'MgmtAllowed' information, which is exchanged during
7169 * LNI, is also be available at this point.
7170 */
7171 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7172 /* print the active widths */
7173 get_link_widths(dd, &active_tx, &active_rx);
7174 dd_dev_info(dd,
7175 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7176 (int)power_management, (int)continious);
7177 dd_dev_info(dd,
7178 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7179 (int)vau,
7180 (int)z,
7181 (int)vcu,
7182 (int)vl15buf,
7183 (int)partner_supported_crc);
7184 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7185 (u32)remote_tx_rate, (u32)link_widths);
7186 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7187 (u32)device_id, (u32)device_rev);
7188 /*
7189 * The peer vAU value just read is the peer receiver value. HFI does
7190 * not support a transmit vAU of 0 (AU == 8). We advertised that
7191 * with Z=1 in the fabric capabilities sent to the peer. The peer
7192 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7193 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7194 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7195 * subject to the Z value exception.
7196 */
7197 if (vau == 0)
7198 vau = 1;
7199 set_up_vl15(dd, vau, vl15buf);
7200
7201 /* set up the LCB CRC mode */
7202 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7203
7204 /* order is important: use the lowest bit in common */
7205 if (crc_mask & CAP_CRC_14B)
7206 crc_val = LCB_CRC_14B;
7207 else if (crc_mask & CAP_CRC_48B)
7208 crc_val = LCB_CRC_48B;
7209 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7210 crc_val = LCB_CRC_12B_16B_PER_LANE;
7211 else
7212 crc_val = LCB_CRC_16B;
7213
7214 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7215 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7216 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7217
7218 /* set (14b only) or clear sideband credit */
7219 reg = read_csr(dd, SEND_CM_CTRL);
7220 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7221 write_csr(dd, SEND_CM_CTRL,
7222 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7223 } else {
7224 write_csr(dd, SEND_CM_CTRL,
7225 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7226 }
7227
7228 ppd->link_speed_active = 0; /* invalid value */
7229 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7230 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7231 switch (remote_tx_rate) {
7232 case 0:
7233 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7234 break;
7235 case 1:
7236 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7237 break;
7238 }
7239 } else {
7240 /* actual rate is highest bit of the ANDed rates */
7241 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7242
7243 if (rate & 2)
7244 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7245 else if (rate & 1)
7246 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7247 }
7248 if (ppd->link_speed_active == 0) {
7249 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7250 __func__, (int)remote_tx_rate);
7251 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7252 }
7253
7254 /*
7255 * Cache the values of the supported, enabled, and active
7256 * LTP CRC modes to return in 'portinfo' queries. But the bit
7257 * flags that are returned in the portinfo query differ from
7258 * what's in the link_crc_mask, crc_sizes, and crc_val
7259 * variables. Convert these here.
7260 */
7261 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7262 /* supported crc modes */
7263 ppd->port_ltp_crc_mode |=
7264 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7265 /* enabled crc modes */
7266 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7267 /* active crc mode */
7268
7269 /* set up the remote credit return table */
7270 assign_remote_cm_au_table(dd, vcu);
7271
7272 /*
7273 * The LCB is reset on entry to handle_verify_cap(), so this must
7274 * be applied on every link up.
7275 *
7276 * Adjust LCB error kill enable to kill the link if
7277 * these RBUF errors are seen:
7278 * REPLAY_BUF_MBE_SMASK
7279 * FLIT_INPUT_BUF_MBE_SMASK
7280 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007281 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007282 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7283 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7284 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7285 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7286 }
7287
7288 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7289 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7290
7291 /* give 8051 access to the LCB CSRs */
7292 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7293 set_8051_lcb_access(dd);
7294
7295 ppd->neighbor_guid =
7296 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7297 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7298 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7299 ppd->neighbor_type =
7300 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7301 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7302 ppd->neighbor_fm_security =
7303 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7304 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7305 dd_dev_info(dd,
7306 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7307 ppd->neighbor_guid, ppd->neighbor_type,
7308 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7309 if (ppd->mgmt_allowed)
7310 add_full_mgmt_pkey(ppd);
7311
7312 /* tell the 8051 to go to LinkUp */
7313 set_link_state(ppd, HLS_GOING_UP);
7314}
7315
7316/*
7317 * Apply the link width downgrade enabled policy against the current active
7318 * link widths.
7319 *
7320 * Called when the enabled policy changes or the active link widths change.
7321 */
7322void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7323{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007324 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007325 int tries;
7326 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007327 u16 tx, rx;
7328
Dean Luick323fd782015-11-16 21:59:24 -05007329 /* use the hls lock to avoid a race with actual link up */
7330 tries = 0;
7331retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007332 mutex_lock(&ppd->hls_lock);
7333 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007334 if (!(ppd->host_link_state & HLS_UP)) {
7335 /* still going up..wait and retry */
7336 if (ppd->host_link_state & HLS_GOING_UP) {
7337 if (++tries < 1000) {
7338 mutex_unlock(&ppd->hls_lock);
7339 usleep_range(100, 120); /* arbitrary */
7340 goto retry;
7341 }
7342 dd_dev_err(ppd->dd,
7343 "%s: giving up waiting for link state change\n",
7344 __func__);
7345 }
7346 goto done;
7347 }
7348
7349 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007350
7351 if (refresh_widths) {
7352 get_link_widths(ppd->dd, &tx, &rx);
7353 ppd->link_width_downgrade_tx_active = tx;
7354 ppd->link_width_downgrade_rx_active = rx;
7355 }
7356
7357 if (lwde == 0) {
7358 /* downgrade is disabled */
7359
7360 /* bounce if not at starting active width */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007361 if ((ppd->link_width_active != ppd->link_width_downgrade_tx_active) ||
7362 (ppd->link_width_active != ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007363 dd_dev_err(ppd->dd,
7364 "Link downgrade is disabled and link has downgraded, downing link\n");
7365 dd_dev_err(ppd->dd,
7366 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7367 ppd->link_width_active,
7368 ppd->link_width_downgrade_tx_active,
7369 ppd->link_width_downgrade_rx_active);
7370 do_bounce = 1;
7371 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007372 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7373 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007374 /* Tx or Rx is outside the enabled policy */
7375 dd_dev_err(ppd->dd,
7376 "Link is outside of downgrade allowed, downing link\n");
7377 dd_dev_err(ppd->dd,
7378 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7379 lwde,
7380 ppd->link_width_downgrade_tx_active,
7381 ppd->link_width_downgrade_rx_active);
7382 do_bounce = 1;
7383 }
7384
Dean Luick323fd782015-11-16 21:59:24 -05007385done:
7386 mutex_unlock(&ppd->hls_lock);
7387
Mike Marciniszyn77241052015-07-30 15:17:43 -04007388 if (do_bounce) {
7389 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7390 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7391 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007392 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007393 start_link(ppd);
7394 }
7395}
7396
7397/*
7398 * Handle a link downgrade interrupt from the 8051.
7399 *
7400 * This is a work-queue function outside of the interrupt.
7401 */
7402void handle_link_downgrade(struct work_struct *work)
7403{
7404 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7405 link_downgrade_work);
7406
7407 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7408 apply_link_downgrade_policy(ppd, 1);
7409}
7410
7411static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7412{
7413 return flag_string(buf, buf_len, flags, dcc_err_flags,
7414 ARRAY_SIZE(dcc_err_flags));
7415}
7416
7417static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7418{
7419 return flag_string(buf, buf_len, flags, lcb_err_flags,
7420 ARRAY_SIZE(lcb_err_flags));
7421}
7422
7423static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7424{
7425 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7426 ARRAY_SIZE(dc8051_err_flags));
7427}
7428
7429static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7430{
7431 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7432 ARRAY_SIZE(dc8051_info_err_flags));
7433}
7434
7435static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7436{
7437 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7438 ARRAY_SIZE(dc8051_info_host_msg_flags));
7439}
7440
7441static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7442{
7443 struct hfi1_pportdata *ppd = dd->pport;
7444 u64 info, err, host_msg;
7445 int queue_link_down = 0;
7446 char buf[96];
7447
7448 /* look at the flags */
7449 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7450 /* 8051 information set by firmware */
7451 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7452 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7453 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7454 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7455 host_msg = (info >>
7456 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7457 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7458
7459 /*
7460 * Handle error flags.
7461 */
7462 if (err & FAILED_LNI) {
7463 /*
7464 * LNI error indications are cleared by the 8051
7465 * only when starting polling. Only pay attention
7466 * to them when in the states that occur during
7467 * LNI.
7468 */
7469 if (ppd->host_link_state
7470 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7471 queue_link_down = 1;
7472 dd_dev_info(dd, "Link error: %s\n",
7473 dc8051_info_err_string(buf,
7474 sizeof(buf),
7475 err & FAILED_LNI));
7476 }
7477 err &= ~(u64)FAILED_LNI;
7478 }
Dean Luick6d014532015-12-01 15:38:23 -05007479 /* unknown frames can happen durning LNI, just count */
7480 if (err & UNKNOWN_FRAME) {
7481 ppd->unknown_frame_count++;
7482 err &= ~(u64)UNKNOWN_FRAME;
7483 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007484 if (err) {
7485 /* report remaining errors, but do not do anything */
7486 dd_dev_err(dd, "8051 info error: %s\n",
7487 dc8051_info_err_string(buf, sizeof(buf), err));
7488 }
7489
7490 /*
7491 * Handle host message flags.
7492 */
7493 if (host_msg & HOST_REQ_DONE) {
7494 /*
7495 * Presently, the driver does a busy wait for
7496 * host requests to complete. This is only an
7497 * informational message.
7498 * NOTE: The 8051 clears the host message
7499 * information *on the next 8051 command*.
7500 * Therefore, when linkup is achieved,
7501 * this flag will still be set.
7502 */
7503 host_msg &= ~(u64)HOST_REQ_DONE;
7504 }
7505 if (host_msg & BC_SMA_MSG) {
7506 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7507 host_msg &= ~(u64)BC_SMA_MSG;
7508 }
7509 if (host_msg & LINKUP_ACHIEVED) {
7510 dd_dev_info(dd, "8051: Link up\n");
7511 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7512 host_msg &= ~(u64)LINKUP_ACHIEVED;
7513 }
7514 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007515 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007516 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7517 }
7518 if (host_msg & VERIFY_CAP_FRAME) {
7519 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7520 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7521 }
7522 if (host_msg & LINK_GOING_DOWN) {
7523 const char *extra = "";
7524 /* no downgrade action needed if going down */
7525 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7526 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7527 extra = " (ignoring downgrade)";
7528 }
7529 dd_dev_info(dd, "8051: Link down%s\n", extra);
7530 queue_link_down = 1;
7531 host_msg &= ~(u64)LINK_GOING_DOWN;
7532 }
7533 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7534 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7535 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7536 }
7537 if (host_msg) {
7538 /* report remaining messages, but do not do anything */
7539 dd_dev_info(dd, "8051 info host message: %s\n",
7540 dc8051_info_host_msg_string(buf, sizeof(buf),
7541 host_msg));
7542 }
7543
7544 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7545 }
7546 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7547 /*
7548 * Lost the 8051 heartbeat. If this happens, we
7549 * receive constant interrupts about it. Disable
7550 * the interrupt after the first.
7551 */
7552 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7553 write_csr(dd, DC_DC8051_ERR_EN,
7554 read_csr(dd, DC_DC8051_ERR_EN)
7555 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7556
7557 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7558 }
7559 if (reg) {
7560 /* report the error, but do not do anything */
7561 dd_dev_err(dd, "8051 error: %s\n",
7562 dc8051_err_string(buf, sizeof(buf), reg));
7563 }
7564
7565 if (queue_link_down) {
7566 /* if the link is already going down or disabled, do not
7567 * queue another */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007568 if ((ppd->host_link_state &
7569 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7570 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007571 dd_dev_info(dd, "%s: not queuing link down\n",
7572 __func__);
7573 } else {
7574 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7575 }
7576 }
7577}
7578
7579static const char * const fm_config_txt[] = {
7580[0] =
7581 "BadHeadDist: Distance violation between two head flits",
7582[1] =
7583 "BadTailDist: Distance violation between two tail flits",
7584[2] =
7585 "BadCtrlDist: Distance violation between two credit control flits",
7586[3] =
7587 "BadCrdAck: Credits return for unsupported VL",
7588[4] =
7589 "UnsupportedVLMarker: Received VL Marker",
7590[5] =
7591 "BadPreempt: Exceeded the preemption nesting level",
7592[6] =
7593 "BadControlFlit: Received unsupported control flit",
7594/* no 7 */
7595[8] =
7596 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7597};
7598
7599static const char * const port_rcv_txt[] = {
7600[1] =
7601 "BadPktLen: Illegal PktLen",
7602[2] =
7603 "PktLenTooLong: Packet longer than PktLen",
7604[3] =
7605 "PktLenTooShort: Packet shorter than PktLen",
7606[4] =
7607 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7608[5] =
7609 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7610[6] =
7611 "BadL2: Illegal L2 opcode",
7612[7] =
7613 "BadSC: Unsupported SC",
7614[9] =
7615 "BadRC: Illegal RC",
7616[11] =
7617 "PreemptError: Preempting with same VL",
7618[12] =
7619 "PreemptVL15: Preempting a VL15 packet",
7620};
7621
7622#define OPA_LDR_FMCONFIG_OFFSET 16
7623#define OPA_LDR_PORTRCV_OFFSET 0
7624static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7625{
7626 u64 info, hdr0, hdr1;
7627 const char *extra;
7628 char buf[96];
7629 struct hfi1_pportdata *ppd = dd->pport;
7630 u8 lcl_reason = 0;
7631 int do_bounce = 0;
7632
7633 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7634 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7635 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7636 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7637 /* set status bit */
7638 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7639 }
7640 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7641 }
7642
7643 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7644 struct hfi1_pportdata *ppd = dd->pport;
7645 /* this counter saturates at (2^32) - 1 */
7646 if (ppd->link_downed < (u32)UINT_MAX)
7647 ppd->link_downed++;
7648 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7649 }
7650
7651 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7652 u8 reason_valid = 1;
7653
7654 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7655 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7656 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7657 /* set status bit */
7658 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7659 }
7660 switch (info) {
7661 case 0:
7662 case 1:
7663 case 2:
7664 case 3:
7665 case 4:
7666 case 5:
7667 case 6:
7668 extra = fm_config_txt[info];
7669 break;
7670 case 8:
7671 extra = fm_config_txt[info];
7672 if (ppd->port_error_action &
7673 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7674 do_bounce = 1;
7675 /*
7676 * lcl_reason cannot be derived from info
7677 * for this error
7678 */
7679 lcl_reason =
7680 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7681 }
7682 break;
7683 default:
7684 reason_valid = 0;
7685 snprintf(buf, sizeof(buf), "reserved%lld", info);
7686 extra = buf;
7687 break;
7688 }
7689
7690 if (reason_valid && !do_bounce) {
7691 do_bounce = ppd->port_error_action &
7692 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7693 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7694 }
7695
7696 /* just report this */
7697 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7698 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7699 }
7700
7701 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7702 u8 reason_valid = 1;
7703
7704 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7705 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7706 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7707 if (!(dd->err_info_rcvport.status_and_code &
7708 OPA_EI_STATUS_SMASK)) {
7709 dd->err_info_rcvport.status_and_code =
7710 info & OPA_EI_CODE_SMASK;
7711 /* set status bit */
7712 dd->err_info_rcvport.status_and_code |=
7713 OPA_EI_STATUS_SMASK;
7714 /* save first 2 flits in the packet that caused
7715 * the error */
7716 dd->err_info_rcvport.packet_flit1 = hdr0;
7717 dd->err_info_rcvport.packet_flit2 = hdr1;
7718 }
7719 switch (info) {
7720 case 1:
7721 case 2:
7722 case 3:
7723 case 4:
7724 case 5:
7725 case 6:
7726 case 7:
7727 case 9:
7728 case 11:
7729 case 12:
7730 extra = port_rcv_txt[info];
7731 break;
7732 default:
7733 reason_valid = 0;
7734 snprintf(buf, sizeof(buf), "reserved%lld", info);
7735 extra = buf;
7736 break;
7737 }
7738
7739 if (reason_valid && !do_bounce) {
7740 do_bounce = ppd->port_error_action &
7741 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7742 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7743 }
7744
7745 /* just report this */
7746 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7747 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7748 hdr0, hdr1);
7749
7750 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7751 }
7752
7753 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7754 /* informative only */
7755 dd_dev_info(dd, "8051 access to LCB blocked\n");
7756 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7757 }
7758 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7759 /* informative only */
7760 dd_dev_info(dd, "host access to LCB blocked\n");
7761 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7762 }
7763
7764 /* report any remaining errors */
7765 if (reg)
7766 dd_dev_info(dd, "DCC Error: %s\n",
7767 dcc_err_string(buf, sizeof(buf), reg));
7768
7769 if (lcl_reason == 0)
7770 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7771
7772 if (do_bounce) {
7773 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7774 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7775 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7776 }
7777}
7778
7779static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7780{
7781 char buf[96];
7782
7783 dd_dev_info(dd, "LCB Error: %s\n",
7784 lcb_err_string(buf, sizeof(buf), reg));
7785}
7786
7787/*
7788 * CCE block DC interrupt. Source is < 8.
7789 */
7790static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7791{
7792 const struct err_reg_info *eri = &dc_errs[source];
7793
7794 if (eri->handler) {
7795 interrupt_clear_down(dd, 0, eri);
7796 } else if (source == 3 /* dc_lbm_int */) {
7797 /*
7798 * This indicates that a parity error has occurred on the
7799 * address/control lines presented to the LBM. The error
7800 * is a single pulse, there is no associated error flag,
7801 * and it is non-maskable. This is because if a parity
7802 * error occurs on the request the request is dropped.
7803 * This should never occur, but it is nice to know if it
7804 * ever does.
7805 */
7806 dd_dev_err(dd, "Parity error in DC LBM block\n");
7807 } else {
7808 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7809 }
7810}
7811
7812/*
7813 * TX block send credit interrupt. Source is < 160.
7814 */
7815static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7816{
7817 sc_group_release_update(dd, source);
7818}
7819
7820/*
7821 * TX block SDMA interrupt. Source is < 48.
7822 *
7823 * SDMA interrupts are grouped by type:
7824 *
7825 * 0 - N-1 = SDma
7826 * N - 2N-1 = SDmaProgress
7827 * 2N - 3N-1 = SDmaIdle
7828 */
7829static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7830{
7831 /* what interrupt */
7832 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7833 /* which engine */
7834 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7835
7836#ifdef CONFIG_SDMA_VERBOSITY
7837 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7838 slashstrip(__FILE__), __LINE__, __func__);
7839 sdma_dumpstate(&dd->per_sdma[which]);
7840#endif
7841
7842 if (likely(what < 3 && which < dd->num_sdma)) {
7843 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7844 } else {
7845 /* should not happen */
7846 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7847 }
7848}
7849
7850/*
7851 * RX block receive available interrupt. Source is < 160.
7852 */
7853static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7854{
7855 struct hfi1_ctxtdata *rcd;
7856 char *err_detail;
7857
7858 if (likely(source < dd->num_rcv_contexts)) {
7859 rcd = dd->rcd[source];
7860 if (rcd) {
7861 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007862 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007863 else
7864 handle_user_interrupt(rcd);
7865 return; /* OK */
7866 }
7867 /* received an interrupt, but no rcd */
7868 err_detail = "dataless";
7869 } else {
7870 /* received an interrupt, but are not using that context */
7871 err_detail = "out of range";
7872 }
7873 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7874 err_detail, source);
7875}
7876
7877/*
7878 * RX block receive urgent interrupt. Source is < 160.
7879 */
7880static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7881{
7882 struct hfi1_ctxtdata *rcd;
7883 char *err_detail;
7884
7885 if (likely(source < dd->num_rcv_contexts)) {
7886 rcd = dd->rcd[source];
7887 if (rcd) {
7888 /* only pay attention to user urgent interrupts */
7889 if (source >= dd->first_user_ctxt)
7890 handle_user_interrupt(rcd);
7891 return; /* OK */
7892 }
7893 /* received an interrupt, but no rcd */
7894 err_detail = "dataless";
7895 } else {
7896 /* received an interrupt, but are not using that context */
7897 err_detail = "out of range";
7898 }
7899 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7900 err_detail, source);
7901}
7902
7903/*
7904 * Reserved range interrupt. Should not be called in normal operation.
7905 */
7906static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7907{
7908 char name[64];
7909
7910 dd_dev_err(dd, "unexpected %s interrupt\n",
7911 is_reserved_name(name, sizeof(name), source));
7912}
7913
7914static const struct is_table is_table[] = {
7915/* start end
7916 name func interrupt func */
7917{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7918 is_misc_err_name, is_misc_err_int },
7919{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7920 is_sdma_eng_err_name, is_sdma_eng_err_int },
7921{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7922 is_sendctxt_err_name, is_sendctxt_err_int },
7923{ IS_SDMA_START, IS_SDMA_END,
7924 is_sdma_eng_name, is_sdma_eng_int },
7925{ IS_VARIOUS_START, IS_VARIOUS_END,
7926 is_various_name, is_various_int },
7927{ IS_DC_START, IS_DC_END,
7928 is_dc_name, is_dc_int },
7929{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7930 is_rcv_avail_name, is_rcv_avail_int },
7931{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7932 is_rcv_urgent_name, is_rcv_urgent_int },
7933{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7934 is_send_credit_name, is_send_credit_int},
7935{ IS_RESERVED_START, IS_RESERVED_END,
7936 is_reserved_name, is_reserved_int},
7937};
7938
7939/*
7940 * Interrupt source interrupt - called when the given source has an interrupt.
7941 * Source is a bit index into an array of 64-bit integers.
7942 */
7943static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7944{
7945 const struct is_table *entry;
7946
7947 /* avoids a double compare by walking the table in-order */
7948 for (entry = &is_table[0]; entry->is_name; entry++) {
7949 if (source < entry->end) {
7950 trace_hfi1_interrupt(dd, entry, source);
7951 entry->is_int(dd, source - entry->start);
7952 return;
7953 }
7954 }
7955 /* fell off the end */
7956 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7957}
7958
7959/*
7960 * General interrupt handler. This is able to correctly handle
7961 * all interrupts in case INTx is used.
7962 */
7963static irqreturn_t general_interrupt(int irq, void *data)
7964{
7965 struct hfi1_devdata *dd = data;
7966 u64 regs[CCE_NUM_INT_CSRS];
7967 u32 bit;
7968 int i;
7969
7970 this_cpu_inc(*dd->int_counter);
7971
7972 /* phase 1: scan and clear all handled interrupts */
7973 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7974 if (dd->gi_mask[i] == 0) {
7975 regs[i] = 0; /* used later */
7976 continue;
7977 }
7978 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7979 dd->gi_mask[i];
7980 /* only clear if anything is set */
7981 if (regs[i])
7982 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7983 }
7984
7985 /* phase 2: call the appropriate handler */
7986 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John8638b772016-02-14 20:19:24 -08007987 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007988 is_interrupt(dd, bit);
7989 }
7990
7991 return IRQ_HANDLED;
7992}
7993
7994static irqreturn_t sdma_interrupt(int irq, void *data)
7995{
7996 struct sdma_engine *sde = data;
7997 struct hfi1_devdata *dd = sde->dd;
7998 u64 status;
7999
8000#ifdef CONFIG_SDMA_VERBOSITY
8001 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8002 slashstrip(__FILE__), __LINE__, __func__);
8003 sdma_dumpstate(sde);
8004#endif
8005
8006 this_cpu_inc(*dd->int_counter);
8007
8008 /* This read_csr is really bad in the hot path */
8009 status = read_csr(dd,
Jubin John8638b772016-02-14 20:19:24 -08008010 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
Mike Marciniszyn77241052015-07-30 15:17:43 -04008011 & sde->imask;
8012 if (likely(status)) {
8013 /* clear the interrupt(s) */
8014 write_csr(dd,
Jubin John8638b772016-02-14 20:19:24 -08008015 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
Mike Marciniszyn77241052015-07-30 15:17:43 -04008016 status);
8017
8018 /* handle the interrupt(s) */
8019 sdma_engine_interrupt(sde, status);
8020 } else
8021 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8022 sde->this_idx);
8023
8024 return IRQ_HANDLED;
8025}
8026
8027/*
Dean Luickecd42f82016-02-03 14:35:14 -08008028 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8029 * to insure that the write completed. This does NOT guarantee that
8030 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008031 */
8032static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8033{
8034 struct hfi1_devdata *dd = rcd->dd;
8035 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8036
8037 mmiowb(); /* make sure everything before is written */
8038 write_csr(dd, addr, rcd->imask);
8039 /* force the above write on the chip and get a value back */
8040 (void)read_csr(dd, addr);
8041}
8042
8043/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008044void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008045{
8046 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8047}
8048
Dean Luickecd42f82016-02-03 14:35:14 -08008049/*
8050 * Return non-zero if a packet is present.
8051 *
8052 * This routine is called when rechecking for packets after the RcvAvail
8053 * interrupt has been cleared down. First, do a quick check of memory for
8054 * a packet present. If not found, use an expensive CSR read of the context
8055 * tail to determine the actual tail. The CSR read is necessary because there
8056 * is no method to push pending DMAs to memory other than an interrupt and we
8057 * are trying to determine if we need to force an interrupt.
8058 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008059static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8060{
Dean Luickecd42f82016-02-03 14:35:14 -08008061 u32 tail;
8062 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008063
Dean Luickecd42f82016-02-03 14:35:14 -08008064 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8065 present = (rcd->seq_cnt ==
8066 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8067 else /* is RDMA rtail */
8068 present = (rcd->head != get_rcvhdrtail(rcd));
8069
8070 if (present)
8071 return 1;
8072
8073 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8074 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8075 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008076}
8077
8078/*
8079 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8080 * This routine will try to handle packets immediately (latency), but if
8081 * it finds too many, it will invoke the thread handler (bandwitdh). The
8082 * chip receive interupt is *not* cleared down until this or the thread (if
8083 * invoked) is finished. The intent is to avoid extra interrupts while we
8084 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008085 */
8086static irqreturn_t receive_context_interrupt(int irq, void *data)
8087{
8088 struct hfi1_ctxtdata *rcd = data;
8089 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008090 int disposition;
8091 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008092
8093 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8094 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008095 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008096
Dean Luickf4f30031c2015-10-26 10:28:44 -04008097 /* receive interrupt remains blocked while processing packets */
8098 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008099
Dean Luickf4f30031c2015-10-26 10:28:44 -04008100 /*
8101 * Too many packets were seen while processing packets in this
8102 * IRQ handler. Invoke the handler thread. The receive interrupt
8103 * remains blocked.
8104 */
8105 if (disposition == RCV_PKT_LIMIT)
8106 return IRQ_WAKE_THREAD;
8107
8108 /*
8109 * The packet processor detected no more packets. Clear the receive
8110 * interrupt and recheck for a packet packet that may have arrived
8111 * after the previous check and interrupt clear. If a packet arrived,
8112 * force another interrupt.
8113 */
8114 clear_recv_intr(rcd);
8115 present = check_packet_present(rcd);
8116 if (present)
8117 force_recv_intr(rcd);
8118
8119 return IRQ_HANDLED;
8120}
8121
8122/*
8123 * Receive packet thread handler. This expects to be invoked with the
8124 * receive interrupt still blocked.
8125 */
8126static irqreturn_t receive_context_thread(int irq, void *data)
8127{
8128 struct hfi1_ctxtdata *rcd = data;
8129 int present;
8130
8131 /* receive interrupt is still blocked from the IRQ handler */
8132 (void)rcd->do_interrupt(rcd, 1);
8133
8134 /*
8135 * The packet processor will only return if it detected no more
8136 * packets. Hold IRQs here so we can safely clear the interrupt and
8137 * recheck for a packet that may have arrived after the previous
8138 * check and the interrupt clear. If a packet arrived, force another
8139 * interrupt.
8140 */
8141 local_irq_disable();
8142 clear_recv_intr(rcd);
8143 present = check_packet_present(rcd);
8144 if (present)
8145 force_recv_intr(rcd);
8146 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008147
8148 return IRQ_HANDLED;
8149}
8150
8151/* ========================================================================= */
8152
8153u32 read_physical_state(struct hfi1_devdata *dd)
8154{
8155 u64 reg;
8156
8157 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8158 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8159 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8160}
8161
Jim Snowfb9036d2016-01-11 18:32:21 -05008162u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008163{
8164 u64 reg;
8165
8166 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8167 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8168 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8169}
8170
8171static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8172{
8173 u64 reg;
8174
8175 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8176 /* clear current state, set new state */
8177 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8178 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8179 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8180}
8181
8182/*
8183 * Use the 8051 to read a LCB CSR.
8184 */
8185static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8186{
8187 u32 regno;
8188 int ret;
8189
8190 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8191 if (acquire_lcb_access(dd, 0) == 0) {
8192 *data = read_csr(dd, addr);
8193 release_lcb_access(dd, 0);
8194 return 0;
8195 }
8196 return -EBUSY;
8197 }
8198
8199 /* register is an index of LCB registers: (offset - base) / 8 */
8200 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8201 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8202 if (ret != HCMD_SUCCESS)
8203 return -EBUSY;
8204 return 0;
8205}
8206
8207/*
8208 * Read an LCB CSR. Access may not be in host control, so check.
8209 * Return 0 on success, -EBUSY on failure.
8210 */
8211int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8212{
8213 struct hfi1_pportdata *ppd = dd->pport;
8214
8215 /* if up, go through the 8051 for the value */
8216 if (ppd->host_link_state & HLS_UP)
8217 return read_lcb_via_8051(dd, addr, data);
8218 /* if going up or down, no access */
8219 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8220 return -EBUSY;
8221 /* otherwise, host has access */
8222 *data = read_csr(dd, addr);
8223 return 0;
8224}
8225
8226/*
8227 * Use the 8051 to write a LCB CSR.
8228 */
8229static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8230{
Dean Luick3bf40d62015-11-06 20:07:04 -05008231 u32 regno;
8232 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008233
Dean Luick3bf40d62015-11-06 20:07:04 -05008234 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8235 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8236 if (acquire_lcb_access(dd, 0) == 0) {
8237 write_csr(dd, addr, data);
8238 release_lcb_access(dd, 0);
8239 return 0;
8240 }
8241 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008242 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008243
8244 /* register is an index of LCB registers: (offset - base) / 8 */
8245 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8246 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8247 if (ret != HCMD_SUCCESS)
8248 return -EBUSY;
8249 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008250}
8251
8252/*
8253 * Write an LCB CSR. Access may not be in host control, so check.
8254 * Return 0 on success, -EBUSY on failure.
8255 */
8256int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8257{
8258 struct hfi1_pportdata *ppd = dd->pport;
8259
8260 /* if up, go through the 8051 for the value */
8261 if (ppd->host_link_state & HLS_UP)
8262 return write_lcb_via_8051(dd, addr, data);
8263 /* if going up or down, no access */
8264 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8265 return -EBUSY;
8266 /* otherwise, host has access */
8267 write_csr(dd, addr, data);
8268 return 0;
8269}
8270
8271/*
8272 * Returns:
8273 * < 0 = Linux error, not able to get access
8274 * > 0 = 8051 command RETURN_CODE
8275 */
8276static int do_8051_command(
8277 struct hfi1_devdata *dd,
8278 u32 type,
8279 u64 in_data,
8280 u64 *out_data)
8281{
8282 u64 reg, completed;
8283 int return_code;
8284 unsigned long flags;
8285 unsigned long timeout;
8286
8287 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8288
8289 /*
8290 * Alternative to holding the lock for a long time:
8291 * - keep busy wait - have other users bounce off
8292 */
8293 spin_lock_irqsave(&dd->dc8051_lock, flags);
8294
8295 /* We can't send any commands to the 8051 if it's in reset */
8296 if (dd->dc_shutdown) {
8297 return_code = -ENODEV;
8298 goto fail;
8299 }
8300
8301 /*
8302 * If an 8051 host command timed out previously, then the 8051 is
8303 * stuck.
8304 *
8305 * On first timeout, attempt to reset and restart the entire DC
8306 * block (including 8051). (Is this too big of a hammer?)
8307 *
8308 * If the 8051 times out a second time, the reset did not bring it
8309 * back to healthy life. In that case, fail any subsequent commands.
8310 */
8311 if (dd->dc8051_timed_out) {
8312 if (dd->dc8051_timed_out > 1) {
8313 dd_dev_err(dd,
8314 "Previous 8051 host command timed out, skipping command %u\n",
8315 type);
8316 return_code = -ENXIO;
8317 goto fail;
8318 }
8319 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8320 dc_shutdown(dd);
8321 dc_start(dd);
8322 spin_lock_irqsave(&dd->dc8051_lock, flags);
8323 }
8324
8325 /*
8326 * If there is no timeout, then the 8051 command interface is
8327 * waiting for a command.
8328 */
8329
8330 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008331 * When writing a LCB CSR, out_data contains the full value to
8332 * to be written, while in_data contains the relative LCB
8333 * address in 7:0. Do the work here, rather than the caller,
8334 * of distrubting the write data to where it needs to go:
8335 *
8336 * Write data
8337 * 39:00 -> in_data[47:8]
8338 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8339 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8340 */
8341 if (type == HCMD_WRITE_LCB_CSR) {
8342 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8343 reg = ((((*out_data) >> 40) & 0xff) <<
8344 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8345 | ((((*out_data) >> 48) & 0xffff) <<
8346 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8347 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8348 }
8349
8350 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008351 * Do two writes: the first to stabilize the type and req_data, the
8352 * second to activate.
8353 */
8354 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8355 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8356 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8357 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8358 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8359 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8360 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8361
8362 /* wait for completion, alternate: interrupt */
8363 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8364 while (1) {
8365 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8366 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8367 if (completed)
8368 break;
8369 if (time_after(jiffies, timeout)) {
8370 dd->dc8051_timed_out++;
8371 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8372 if (out_data)
8373 *out_data = 0;
8374 return_code = -ETIMEDOUT;
8375 goto fail;
8376 }
8377 udelay(2);
8378 }
8379
8380 if (out_data) {
8381 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8382 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8383 if (type == HCMD_READ_LCB_CSR) {
8384 /* top 16 bits are in a different register */
8385 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8386 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8387 << (48
8388 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8389 }
8390 }
8391 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8392 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8393 dd->dc8051_timed_out = 0;
8394 /*
8395 * Clear command for next user.
8396 */
8397 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8398
8399fail:
8400 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8401
8402 return return_code;
8403}
8404
8405static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8406{
8407 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8408}
8409
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008410int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8411 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008412{
8413 u64 data;
8414 int ret;
8415
8416 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8417 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8418 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8419 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8420 if (ret != HCMD_SUCCESS) {
8421 dd_dev_err(dd,
8422 "load 8051 config: field id %d, lane %d, err %d\n",
8423 (int)field_id, (int)lane_id, ret);
8424 }
8425 return ret;
8426}
8427
8428/*
8429 * Read the 8051 firmware "registers". Use the RAM directly. Always
8430 * set the result, even on error.
8431 * Return 0 on success, -errno on failure
8432 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008433int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8434 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008435{
8436 u64 big_data;
8437 u32 addr;
8438 int ret;
8439
8440 /* address start depends on the lane_id */
8441 if (lane_id < 4)
8442 addr = (4 * NUM_GENERAL_FIELDS)
8443 + (lane_id * 4 * NUM_LANE_FIELDS);
8444 else
8445 addr = 0;
8446 addr += field_id * 4;
8447
8448 /* read is in 8-byte chunks, hardware will truncate the address down */
8449 ret = read_8051_data(dd, addr, 8, &big_data);
8450
8451 if (ret == 0) {
8452 /* extract the 4 bytes we want */
8453 if (addr & 0x4)
8454 *result = (u32)(big_data >> 32);
8455 else
8456 *result = (u32)big_data;
8457 } else {
8458 *result = 0;
8459 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8460 __func__, lane_id, field_id);
8461 }
8462
8463 return ret;
8464}
8465
8466static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8467 u8 continuous)
8468{
8469 u32 frame;
8470
8471 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8472 | power_management << POWER_MANAGEMENT_SHIFT;
8473 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8474 GENERAL_CONFIG, frame);
8475}
8476
8477static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8478 u16 vl15buf, u8 crc_sizes)
8479{
8480 u32 frame;
8481
8482 frame = (u32)vau << VAU_SHIFT
8483 | (u32)z << Z_SHIFT
8484 | (u32)vcu << VCU_SHIFT
8485 | (u32)vl15buf << VL15BUF_SHIFT
8486 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8487 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8488 GENERAL_CONFIG, frame);
8489}
8490
8491static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8492 u8 *flag_bits, u16 *link_widths)
8493{
8494 u32 frame;
8495
8496 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8497 &frame);
8498 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8499 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8500 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8501}
8502
8503static int write_vc_local_link_width(struct hfi1_devdata *dd,
8504 u8 misc_bits,
8505 u8 flag_bits,
8506 u16 link_widths)
8507{
8508 u32 frame;
8509
8510 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8511 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8512 | (u32)link_widths << LINK_WIDTH_SHIFT;
8513 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8514 frame);
8515}
8516
8517static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8518 u8 device_rev)
8519{
8520 u32 frame;
8521
8522 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8523 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8524 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8525}
8526
8527static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8528 u8 *device_rev)
8529{
8530 u32 frame;
8531
8532 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8533 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8534 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8535 & REMOTE_DEVICE_REV_MASK;
8536}
8537
8538void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8539{
8540 u32 frame;
8541
8542 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8543 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8544 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8545}
8546
8547static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8548 u8 *continuous)
8549{
8550 u32 frame;
8551
8552 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8553 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8554 & POWER_MANAGEMENT_MASK;
8555 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8556 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8557}
8558
8559static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8560 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8561{
8562 u32 frame;
8563
8564 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8565 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8566 *z = (frame >> Z_SHIFT) & Z_MASK;
8567 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8568 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8569 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8570}
8571
8572static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8573 u8 *remote_tx_rate,
8574 u16 *link_widths)
8575{
8576 u32 frame;
8577
8578 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8579 &frame);
8580 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8581 & REMOTE_TX_RATE_MASK;
8582 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8583}
8584
8585static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8586{
8587 u32 frame;
8588
8589 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8590 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8591}
8592
8593static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8594{
8595 u32 frame;
8596
8597 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8598 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8599}
8600
8601static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8602{
8603 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8604}
8605
8606static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8607{
8608 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8609}
8610
8611void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8612{
8613 u32 frame;
8614 int ret;
8615
8616 *link_quality = 0;
8617 if (dd->pport->host_link_state & HLS_UP) {
8618 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8619 &frame);
8620 if (ret == 0)
8621 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8622 & LINK_QUALITY_MASK;
8623 }
8624}
8625
8626static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8627{
8628 u32 frame;
8629
8630 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8631 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8632}
8633
8634static int read_tx_settings(struct hfi1_devdata *dd,
8635 u8 *enable_lane_tx,
8636 u8 *tx_polarity_inversion,
8637 u8 *rx_polarity_inversion,
8638 u8 *max_rate)
8639{
8640 u32 frame;
8641 int ret;
8642
8643 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8644 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8645 & ENABLE_LANE_TX_MASK;
8646 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8647 & TX_POLARITY_INVERSION_MASK;
8648 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8649 & RX_POLARITY_INVERSION_MASK;
8650 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8651 return ret;
8652}
8653
8654static int write_tx_settings(struct hfi1_devdata *dd,
8655 u8 enable_lane_tx,
8656 u8 tx_polarity_inversion,
8657 u8 rx_polarity_inversion,
8658 u8 max_rate)
8659{
8660 u32 frame;
8661
8662 /* no need to mask, all variable sizes match field widths */
8663 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8664 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8665 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8666 | max_rate << MAX_RATE_SHIFT;
8667 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8668}
8669
8670static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8671{
8672 u32 frame, version, prod_id;
8673 int ret, lane;
8674
8675 /* 4 lanes */
8676 for (lane = 0; lane < 4; lane++) {
8677 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8678 if (ret) {
8679 dd_dev_err(
8680 dd,
8681 "Unable to read lane %d firmware details\n",
8682 lane);
8683 continue;
8684 }
8685 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8686 & SPICO_ROM_VERSION_MASK;
8687 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8688 & SPICO_ROM_PROD_ID_MASK;
8689 dd_dev_info(dd,
8690 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8691 lane, version, prod_id);
8692 }
8693}
8694
8695/*
8696 * Read an idle LCB message.
8697 *
8698 * Returns 0 on success, -EINVAL on error
8699 */
8700static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8701{
8702 int ret;
8703
8704 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8705 type, data_out);
8706 if (ret != HCMD_SUCCESS) {
8707 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8708 (u32)type, ret);
8709 return -EINVAL;
8710 }
8711 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8712 /* return only the payload as we already know the type */
8713 *data_out >>= IDLE_PAYLOAD_SHIFT;
8714 return 0;
8715}
8716
8717/*
8718 * Read an idle SMA message. To be done in response to a notification from
8719 * the 8051.
8720 *
8721 * Returns 0 on success, -EINVAL on error
8722 */
8723static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8724{
8725 return read_idle_message(dd,
8726 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8727}
8728
8729/*
8730 * Send an idle LCB message.
8731 *
8732 * Returns 0 on success, -EINVAL on error
8733 */
8734static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8735{
8736 int ret;
8737
8738 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8739 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8740 if (ret != HCMD_SUCCESS) {
8741 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8742 data, ret);
8743 return -EINVAL;
8744 }
8745 return 0;
8746}
8747
8748/*
8749 * Send an idle SMA message.
8750 *
8751 * Returns 0 on success, -EINVAL on error
8752 */
8753int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8754{
8755 u64 data;
8756
8757 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8758 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8759 return send_idle_message(dd, data);
8760}
8761
8762/*
8763 * Initialize the LCB then do a quick link up. This may or may not be
8764 * in loopback.
8765 *
8766 * return 0 on success, -errno on error
8767 */
8768static int do_quick_linkup(struct hfi1_devdata *dd)
8769{
8770 u64 reg;
8771 unsigned long timeout;
8772 int ret;
8773
8774 lcb_shutdown(dd, 0);
8775
8776 if (loopback) {
8777 /* LCB_CFG_LOOPBACK.VAL = 2 */
8778 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8779 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8780 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8781 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8782 }
8783
8784 /* start the LCBs */
8785 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8786 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8787
8788 /* simulator only loopback steps */
8789 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8790 /* LCB_CFG_RUN.EN = 1 */
8791 write_csr(dd, DC_LCB_CFG_RUN,
8792 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8793
8794 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8795 timeout = jiffies + msecs_to_jiffies(10);
8796 while (1) {
8797 reg = read_csr(dd,
8798 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8799 if (reg)
8800 break;
8801 if (time_after(jiffies, timeout)) {
8802 dd_dev_err(dd,
8803 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8804 return -ETIMEDOUT;
8805 }
8806 udelay(2);
8807 }
8808
8809 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8810 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8811 }
8812
8813 if (!loopback) {
8814 /*
8815 * When doing quick linkup and not in loopback, both
8816 * sides must be done with LCB set-up before either
8817 * starts the quick linkup. Put a delay here so that
8818 * both sides can be started and have a chance to be
8819 * done with LCB set up before resuming.
8820 */
8821 dd_dev_err(dd,
8822 "Pausing for peer to be finished with LCB set up\n");
8823 msleep(5000);
8824 dd_dev_err(dd,
8825 "Continuing with quick linkup\n");
8826 }
8827
8828 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8829 set_8051_lcb_access(dd);
8830
8831 /*
8832 * State "quick" LinkUp request sets the physical link state to
8833 * LinkUp without a verify capability sequence.
8834 * This state is in simulator v37 and later.
8835 */
8836 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8837 if (ret != HCMD_SUCCESS) {
8838 dd_dev_err(dd,
8839 "%s: set physical link state to quick LinkUp failed with return %d\n",
8840 __func__, ret);
8841
8842 set_host_lcb_access(dd);
8843 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8844
8845 if (ret >= 0)
8846 ret = -EINVAL;
8847 return ret;
8848 }
8849
8850 return 0; /* success */
8851}
8852
8853/*
8854 * Set the SerDes to internal loopback mode.
8855 * Returns 0 on success, -errno on error.
8856 */
8857static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8858{
8859 int ret;
8860
8861 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8862 if (ret == HCMD_SUCCESS)
8863 return 0;
8864 dd_dev_err(dd,
8865 "Set physical link state to SerDes Loopback failed with return %d\n",
8866 ret);
8867 if (ret >= 0)
8868 ret = -EINVAL;
8869 return ret;
8870}
8871
8872/*
8873 * Do all special steps to set up loopback.
8874 */
8875static int init_loopback(struct hfi1_devdata *dd)
8876{
8877 dd_dev_info(dd, "Entering loopback mode\n");
8878
8879 /* all loopbacks should disable self GUID check */
8880 write_csr(dd, DC_DC8051_CFG_MODE,
8881 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8882
8883 /*
8884 * The simulator has only one loopback option - LCB. Switch
8885 * to that option, which includes quick link up.
8886 *
8887 * Accept all valid loopback values.
8888 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08008889 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8890 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8891 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008892 loopback = LOOPBACK_LCB;
8893 quick_linkup = 1;
8894 return 0;
8895 }
8896
8897 /* handle serdes loopback */
8898 if (loopback == LOOPBACK_SERDES) {
8899 /* internal serdes loopack needs quick linkup on RTL */
8900 if (dd->icode == ICODE_RTL_SILICON)
8901 quick_linkup = 1;
8902 return set_serdes_loopback_mode(dd);
8903 }
8904
8905 /* LCB loopback - handled at poll time */
8906 if (loopback == LOOPBACK_LCB) {
8907 quick_linkup = 1; /* LCB is always quick linkup */
8908
8909 /* not supported in emulation due to emulation RTL changes */
8910 if (dd->icode == ICODE_FPGA_EMULATION) {
8911 dd_dev_err(dd,
8912 "LCB loopback not supported in emulation\n");
8913 return -EINVAL;
8914 }
8915 return 0;
8916 }
8917
8918 /* external cable loopback requires no extra steps */
8919 if (loopback == LOOPBACK_CABLE)
8920 return 0;
8921
8922 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8923 return -EINVAL;
8924}
8925
8926/*
8927 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8928 * used in the Verify Capability link width attribute.
8929 */
8930static u16 opa_to_vc_link_widths(u16 opa_widths)
8931{
8932 int i;
8933 u16 result = 0;
8934
8935 static const struct link_bits {
8936 u16 from;
8937 u16 to;
8938 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08008939 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
8940 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
8941 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
8942 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04008943 };
8944
8945 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8946 if (opa_widths & opa_link_xlate[i].from)
8947 result |= opa_link_xlate[i].to;
8948 }
8949 return result;
8950}
8951
8952/*
8953 * Set link attributes before moving to polling.
8954 */
8955static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8956{
8957 struct hfi1_devdata *dd = ppd->dd;
8958 u8 enable_lane_tx;
8959 u8 tx_polarity_inversion;
8960 u8 rx_polarity_inversion;
8961 int ret;
8962
8963 /* reset our fabric serdes to clear any lingering problems */
8964 fabric_serdes_reset(dd);
8965
8966 /* set the local tx rate - need to read-modify-write */
8967 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8968 &rx_polarity_inversion, &ppd->local_tx_rate);
8969 if (ret)
8970 goto set_local_link_attributes_fail;
8971
8972 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8973 /* set the tx rate to the fastest enabled */
8974 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8975 ppd->local_tx_rate = 1;
8976 else
8977 ppd->local_tx_rate = 0;
8978 } else {
8979 /* set the tx rate to all enabled */
8980 ppd->local_tx_rate = 0;
8981 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8982 ppd->local_tx_rate |= 2;
8983 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8984 ppd->local_tx_rate |= 1;
8985 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008986
8987 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008988 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8989 rx_polarity_inversion, ppd->local_tx_rate);
8990 if (ret != HCMD_SUCCESS)
8991 goto set_local_link_attributes_fail;
8992
8993 /*
8994 * DC supports continuous updates.
8995 */
8996 ret = write_vc_local_phy(dd, 0 /* no power management */,
8997 1 /* continuous updates */);
8998 if (ret != HCMD_SUCCESS)
8999 goto set_local_link_attributes_fail;
9000
9001 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9002 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9003 ppd->port_crc_mode_enabled);
9004 if (ret != HCMD_SUCCESS)
9005 goto set_local_link_attributes_fail;
9006
9007 ret = write_vc_local_link_width(dd, 0, 0,
9008 opa_to_vc_link_widths(ppd->link_width_enabled));
9009 if (ret != HCMD_SUCCESS)
9010 goto set_local_link_attributes_fail;
9011
9012 /* let peer know who we are */
9013 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9014 if (ret == HCMD_SUCCESS)
9015 return 0;
9016
9017set_local_link_attributes_fail:
9018 dd_dev_err(dd,
9019 "Failed to set local link attributes, return 0x%x\n",
9020 ret);
9021 return ret;
9022}
9023
9024/*
9025 * Call this to start the link. Schedule a retry if the cable is not
9026 * present or if unable to start polling. Do not do anything if the
9027 * link is disabled. Returns 0 if link is disabled or moved to polling
9028 */
9029int start_link(struct hfi1_pportdata *ppd)
9030{
9031 if (!ppd->link_enabled) {
9032 dd_dev_info(ppd->dd,
9033 "%s: stopping link start because link is disabled\n",
9034 __func__);
9035 return 0;
9036 }
9037 if (!ppd->driver_link_ready) {
9038 dd_dev_info(ppd->dd,
9039 "%s: stopping link start because driver is not ready\n",
9040 __func__);
9041 return 0;
9042 }
9043
9044 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9045 loopback == LOOPBACK_LCB ||
9046 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9047 return set_link_state(ppd, HLS_DN_POLL);
9048
9049 dd_dev_info(ppd->dd,
9050 "%s: stopping link start because no cable is present\n",
9051 __func__);
9052 return -EAGAIN;
9053}
9054
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009055static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9056{
9057 struct hfi1_devdata *dd = ppd->dd;
9058 u64 mask;
9059 unsigned long timeout;
9060
9061 /*
9062 * Check for QSFP interrupt for t_init (SFF 8679)
9063 */
9064 timeout = jiffies + msecs_to_jiffies(2000);
9065 while (1) {
9066 mask = read_csr(dd, dd->hfi1_id ?
9067 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9068 if (!(mask & QSFP_HFI0_INT_N)) {
9069 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9070 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9071 break;
9072 }
9073 if (time_after(jiffies, timeout)) {
9074 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9075 __func__);
9076 break;
9077 }
9078 udelay(2);
9079 }
9080}
9081
9082static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9083{
9084 struct hfi1_devdata *dd = ppd->dd;
9085 u64 mask;
9086
9087 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9088 if (enable)
9089 mask |= (u64)QSFP_HFI0_INT_N;
9090 else
9091 mask &= ~(u64)QSFP_HFI0_INT_N;
9092 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9093}
9094
9095void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009096{
9097 struct hfi1_devdata *dd = ppd->dd;
9098 u64 mask, qsfp_mask;
9099
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009100 /* Disable INT_N from triggering QSFP interrupts */
9101 set_qsfp_int_n(ppd, 0);
9102
9103 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009104 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009105 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009106 qsfp_mask |= mask;
9107 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009108 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009109
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009110 qsfp_mask = read_csr(dd, dd->hfi1_id ?
9111 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009112 qsfp_mask &= ~mask;
9113 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009114 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009115
9116 udelay(10);
9117
9118 qsfp_mask |= mask;
9119 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009120 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9121
9122 wait_for_qsfp_init(ppd);
9123
9124 /*
9125 * Allow INT_N to trigger the QSFP interrupt to watch
9126 * for alarms and warnings
9127 */
9128 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009129}
9130
9131static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9132 u8 *qsfp_interrupt_status)
9133{
9134 struct hfi1_devdata *dd = ppd->dd;
9135
9136 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9137 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9138 dd_dev_info(dd,
9139 "%s: QSFP cable on fire\n",
9140 __func__);
9141
9142 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9143 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9144 dd_dev_info(dd,
9145 "%s: QSFP cable temperature too low\n",
9146 __func__);
9147
9148 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9149 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9150 dd_dev_info(dd,
9151 "%s: QSFP supply voltage too high\n",
9152 __func__);
9153
9154 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9155 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9156 dd_dev_info(dd,
9157 "%s: QSFP supply voltage too low\n",
9158 __func__);
9159
9160 /* Byte 2 is vendor specific */
9161
9162 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9163 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9164 dd_dev_info(dd,
9165 "%s: Cable RX channel 1/2 power too high\n",
9166 __func__);
9167
9168 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9169 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9170 dd_dev_info(dd,
9171 "%s: Cable RX channel 1/2 power too low\n",
9172 __func__);
9173
9174 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9175 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9176 dd_dev_info(dd,
9177 "%s: Cable RX channel 3/4 power too high\n",
9178 __func__);
9179
9180 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9181 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9182 dd_dev_info(dd,
9183 "%s: Cable RX channel 3/4 power too low\n",
9184 __func__);
9185
9186 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9187 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9188 dd_dev_info(dd,
9189 "%s: Cable TX channel 1/2 bias too high\n",
9190 __func__);
9191
9192 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9193 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9194 dd_dev_info(dd,
9195 "%s: Cable TX channel 1/2 bias too low\n",
9196 __func__);
9197
9198 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9199 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9200 dd_dev_info(dd,
9201 "%s: Cable TX channel 3/4 bias too high\n",
9202 __func__);
9203
9204 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9205 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9206 dd_dev_info(dd,
9207 "%s: Cable TX channel 3/4 bias too low\n",
9208 __func__);
9209
9210 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9211 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9212 dd_dev_info(dd,
9213 "%s: Cable TX channel 1/2 power too high\n",
9214 __func__);
9215
9216 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9217 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9218 dd_dev_info(dd,
9219 "%s: Cable TX channel 1/2 power too low\n",
9220 __func__);
9221
9222 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9223 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9224 dd_dev_info(dd,
9225 "%s: Cable TX channel 3/4 power too high\n",
9226 __func__);
9227
9228 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9229 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9230 dd_dev_info(dd,
9231 "%s: Cable TX channel 3/4 power too low\n",
9232 __func__);
9233
9234 /* Bytes 9-10 and 11-12 are reserved */
9235 /* Bytes 13-15 are vendor specific */
9236
9237 return 0;
9238}
9239
Mike Marciniszyn77241052015-07-30 15:17:43 -04009240/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009241void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009242{
9243 struct qsfp_data *qd;
9244 struct hfi1_pportdata *ppd;
9245 struct hfi1_devdata *dd;
9246
9247 qd = container_of(work, struct qsfp_data, qsfp_work);
9248 ppd = qd->ppd;
9249 dd = ppd->dd;
9250
9251 /* Sanity check */
9252 if (!qsfp_mod_present(ppd))
9253 return;
9254
9255 /*
9256 * Turn DC back on after cables has been
9257 * re-inserted. Up until now, the DC has been in
9258 * reset to save power.
9259 */
9260 dc_start(dd);
9261
9262 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009263 set_qsfp_int_n(ppd, 0);
9264
9265 wait_for_qsfp_init(ppd);
9266
9267 /*
9268 * Allow INT_N to trigger the QSFP interrupt to watch
9269 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009270 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009271 set_qsfp_int_n(ppd, 1);
9272
9273 tune_serdes(ppd);
9274
9275 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009276 }
9277
9278 if (qd->check_interrupt_flags) {
9279 u8 qsfp_interrupt_status[16] = {0,};
9280
9281 if (qsfp_read(ppd, dd->hfi1_id, 6,
9282 &qsfp_interrupt_status[0], 16) != 16) {
9283 dd_dev_info(dd,
9284 "%s: Failed to read status of QSFP module\n",
9285 __func__);
9286 } else {
9287 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009288
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009289 handle_qsfp_error_conditions(
9290 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009291 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9292 ppd->qsfp_info.check_interrupt_flags = 0;
9293 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9294 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009295 }
9296 }
9297}
9298
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009299static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009300{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009301 struct hfi1_pportdata *ppd = dd->pport;
9302 u64 qsfp_mask, cce_int_mask;
9303 const int qsfp1_int_smask = QSFP1_INT % 64;
9304 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009305
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009306 /*
9307 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9308 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9309 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9310 * the index of the appropriate CSR in the CCEIntMask CSR array
9311 */
9312 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9313 (8 * (QSFP1_INT / 64)));
9314 if (dd->hfi1_id) {
9315 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9316 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9317 cce_int_mask);
9318 } else {
9319 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9320 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9321 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009322 }
9323
Mike Marciniszyn77241052015-07-30 15:17:43 -04009324 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9325 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009326 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9327 qsfp_mask);
9328 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9329 qsfp_mask);
9330
9331 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009332
9333 /* Handle active low nature of INT_N and MODPRST_N pins */
9334 if (qsfp_mod_present(ppd))
9335 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9336 write_csr(dd,
9337 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9338 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009339}
9340
Dean Luickbbdeb332015-12-01 15:38:15 -05009341/*
9342 * Do a one-time initialize of the LCB block.
9343 */
9344static void init_lcb(struct hfi1_devdata *dd)
9345{
Dean Luicka59329d2016-02-03 14:32:31 -08009346 /* simulator does not correctly handle LCB cclk loopback, skip */
9347 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9348 return;
9349
Dean Luickbbdeb332015-12-01 15:38:15 -05009350 /* the DC has been reset earlier in the driver load */
9351
9352 /* set LCB for cclk loopback on the port */
9353 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9354 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9355 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9356 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9357 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9358 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9359 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9360}
9361
Mike Marciniszyn77241052015-07-30 15:17:43 -04009362int bringup_serdes(struct hfi1_pportdata *ppd)
9363{
9364 struct hfi1_devdata *dd = ppd->dd;
9365 u64 guid;
9366 int ret;
9367
9368 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9369 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9370
9371 guid = ppd->guid;
9372 if (!guid) {
9373 if (dd->base_guid)
9374 guid = dd->base_guid + ppd->port - 1;
9375 ppd->guid = guid;
9376 }
9377
Mike Marciniszyn77241052015-07-30 15:17:43 -04009378 /* Set linkinit_reason on power up per OPA spec */
9379 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9380
Dean Luickbbdeb332015-12-01 15:38:15 -05009381 /* one-time init of the LCB */
9382 init_lcb(dd);
9383
Mike Marciniszyn77241052015-07-30 15:17:43 -04009384 if (loopback) {
9385 ret = init_loopback(dd);
9386 if (ret < 0)
9387 return ret;
9388 }
9389
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009390 /* tune the SERDES to a ballpark setting for
9391 * optimal signal and bit error rate
9392 * Needs to be done before starting the link
9393 */
9394 tune_serdes(ppd);
9395
Mike Marciniszyn77241052015-07-30 15:17:43 -04009396 return start_link(ppd);
9397}
9398
9399void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9400{
9401 struct hfi1_devdata *dd = ppd->dd;
9402
9403 /*
9404 * Shut down the link and keep it down. First turn off that the
9405 * driver wants to allow the link to be up (driver_link_ready).
9406 * Then make sure the link is not automatically restarted
9407 * (link_enabled). Cancel any pending restart. And finally
9408 * go offline.
9409 */
9410 ppd->driver_link_ready = 0;
9411 ppd->link_enabled = 0;
9412
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009413 ppd->offline_disabled_reason =
9414 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009415 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9416 OPA_LINKDOWN_REASON_SMA_DISABLED);
9417 set_link_state(ppd, HLS_DN_OFFLINE);
9418
9419 /* disable the port */
9420 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9421}
9422
9423static inline int init_cpu_counters(struct hfi1_devdata *dd)
9424{
9425 struct hfi1_pportdata *ppd;
9426 int i;
9427
9428 ppd = (struct hfi1_pportdata *)(dd + 1);
9429 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009430 ppd->ibport_data.rvp.rc_acks = NULL;
9431 ppd->ibport_data.rvp.rc_qacks = NULL;
9432 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9433 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9434 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9435 if (!ppd->ibport_data.rvp.rc_acks ||
9436 !ppd->ibport_data.rvp.rc_delayed_comp ||
9437 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009438 return -ENOMEM;
9439 }
9440
9441 return 0;
9442}
9443
9444static const char * const pt_names[] = {
9445 "expected",
9446 "eager",
9447 "invalid"
9448};
9449
9450static const char *pt_name(u32 type)
9451{
9452 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9453}
9454
9455/*
9456 * index is the index into the receive array
9457 */
9458void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9459 u32 type, unsigned long pa, u16 order)
9460{
9461 u64 reg;
9462 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9463 (dd->kregbase + RCV_ARRAY));
9464
9465 if (!(dd->flags & HFI1_PRESENT))
9466 goto done;
9467
9468 if (type == PT_INVALID) {
9469 pa = 0;
9470 } else if (type > PT_INVALID) {
9471 dd_dev_err(dd,
9472 "unexpected receive array type %u for index %u, not handled\n",
9473 type, index);
9474 goto done;
9475 }
9476
9477 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9478 pt_name(type), index, pa, (unsigned long)order);
9479
9480#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9481 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9482 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9483 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9484 << RCV_ARRAY_RT_ADDR_SHIFT;
9485 writeq(reg, base + (index * 8));
9486
9487 if (type == PT_EAGER)
9488 /*
9489 * Eager entries are written one-by-one so we have to push them
9490 * after we write the entry.
9491 */
9492 flush_wc();
9493done:
9494 return;
9495}
9496
9497void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9498{
9499 struct hfi1_devdata *dd = rcd->dd;
9500 u32 i;
9501
9502 /* this could be optimized */
9503 for (i = rcd->eager_base; i < rcd->eager_base +
9504 rcd->egrbufs.alloced; i++)
9505 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9506
9507 for (i = rcd->expected_base;
9508 i < rcd->expected_base + rcd->expected_count; i++)
9509 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9510}
9511
9512int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9513 struct hfi1_ctxt_info *kinfo)
9514{
9515 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9516 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9517 return 0;
9518}
9519
9520struct hfi1_message_header *hfi1_get_msgheader(
9521 struct hfi1_devdata *dd, __le32 *rhf_addr)
9522{
9523 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9524
9525 return (struct hfi1_message_header *)
9526 (rhf_addr - dd->rhf_offset + offset);
9527}
9528
9529static const char * const ib_cfg_name_strings[] = {
9530 "HFI1_IB_CFG_LIDLMC",
9531 "HFI1_IB_CFG_LWID_DG_ENB",
9532 "HFI1_IB_CFG_LWID_ENB",
9533 "HFI1_IB_CFG_LWID",
9534 "HFI1_IB_CFG_SPD_ENB",
9535 "HFI1_IB_CFG_SPD",
9536 "HFI1_IB_CFG_RXPOL_ENB",
9537 "HFI1_IB_CFG_LREV_ENB",
9538 "HFI1_IB_CFG_LINKLATENCY",
9539 "HFI1_IB_CFG_HRTBT",
9540 "HFI1_IB_CFG_OP_VLS",
9541 "HFI1_IB_CFG_VL_HIGH_CAP",
9542 "HFI1_IB_CFG_VL_LOW_CAP",
9543 "HFI1_IB_CFG_OVERRUN_THRESH",
9544 "HFI1_IB_CFG_PHYERR_THRESH",
9545 "HFI1_IB_CFG_LINKDEFAULT",
9546 "HFI1_IB_CFG_PKEYS",
9547 "HFI1_IB_CFG_MTU",
9548 "HFI1_IB_CFG_LSTATE",
9549 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9550 "HFI1_IB_CFG_PMA_TICKS",
9551 "HFI1_IB_CFG_PORT"
9552};
9553
9554static const char *ib_cfg_name(int which)
9555{
9556 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9557 return "invalid";
9558 return ib_cfg_name_strings[which];
9559}
9560
9561int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9562{
9563 struct hfi1_devdata *dd = ppd->dd;
9564 int val = 0;
9565
9566 switch (which) {
9567 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9568 val = ppd->link_width_enabled;
9569 break;
9570 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9571 val = ppd->link_width_active;
9572 break;
9573 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9574 val = ppd->link_speed_enabled;
9575 break;
9576 case HFI1_IB_CFG_SPD: /* current Link speed */
9577 val = ppd->link_speed_active;
9578 break;
9579
9580 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9581 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9582 case HFI1_IB_CFG_LINKLATENCY:
9583 goto unimplemented;
9584
9585 case HFI1_IB_CFG_OP_VLS:
9586 val = ppd->vls_operational;
9587 break;
9588 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9589 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9590 break;
9591 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9592 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9593 break;
9594 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9595 val = ppd->overrun_threshold;
9596 break;
9597 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9598 val = ppd->phy_error_threshold;
9599 break;
9600 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9601 val = dd->link_default;
9602 break;
9603
9604 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9605 case HFI1_IB_CFG_PMA_TICKS:
9606 default:
9607unimplemented:
9608 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9609 dd_dev_info(
9610 dd,
9611 "%s: which %s: not implemented\n",
9612 __func__,
9613 ib_cfg_name(which));
9614 break;
9615 }
9616
9617 return val;
9618}
9619
9620/*
9621 * The largest MAD packet size.
9622 */
9623#define MAX_MAD_PACKET 2048
9624
9625/*
9626 * Return the maximum header bytes that can go on the _wire_
9627 * for this device. This count includes the ICRC which is
9628 * not part of the packet held in memory but it is appended
9629 * by the HW.
9630 * This is dependent on the device's receive header entry size.
9631 * HFI allows this to be set per-receive context, but the
9632 * driver presently enforces a global value.
9633 */
9634u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9635{
9636 /*
9637 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9638 * the Receive Header Entry Size minus the PBC (or RHF) size
9639 * plus one DW for the ICRC appended by HW.
9640 *
9641 * dd->rcd[0].rcvhdrqentsize is in DW.
9642 * We use rcd[0] as all context will have the same value. Also,
9643 * the first kernel context would have been allocated by now so
9644 * we are guaranteed a valid value.
9645 */
9646 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9647}
9648
9649/*
9650 * Set Send Length
9651 * @ppd - per port data
9652 *
9653 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9654 * registers compare against LRH.PktLen, so use the max bytes included
9655 * in the LRH.
9656 *
9657 * This routine changes all VL values except VL15, which it maintains at
9658 * the same value.
9659 */
9660static void set_send_length(struct hfi1_pportdata *ppd)
9661{
9662 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009663 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9664 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009665 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9666 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9667 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9668 int i;
9669
9670 for (i = 0; i < ppd->vls_supported; i++) {
9671 if (dd->vld[i].mtu > maxvlmtu)
9672 maxvlmtu = dd->vld[i].mtu;
9673 if (i <= 3)
9674 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9675 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9676 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9677 else
9678 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9679 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9680 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9681 }
9682 write_csr(dd, SEND_LEN_CHECK0, len1);
9683 write_csr(dd, SEND_LEN_CHECK1, len2);
9684 /* adjust kernel credit return thresholds based on new MTUs */
9685 /* all kernel receive contexts have the same hdrqentsize */
9686 for (i = 0; i < ppd->vls_supported; i++) {
9687 sc_set_cr_threshold(dd->vld[i].sc,
9688 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9689 dd->rcd[0]->rcvhdrqentsize));
9690 }
9691 sc_set_cr_threshold(dd->vld[15].sc,
9692 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9693 dd->rcd[0]->rcvhdrqentsize));
9694
9695 /* Adjust maximum MTU for the port in DC */
9696 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9697 (ilog2(maxvlmtu >> 8) + 1);
9698 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9699 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9700 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9701 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9702 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9703}
9704
9705static void set_lidlmc(struct hfi1_pportdata *ppd)
9706{
9707 int i;
9708 u64 sreg = 0;
9709 struct hfi1_devdata *dd = ppd->dd;
9710 u32 mask = ~((1U << ppd->lmc) - 1);
9711 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9712
9713 if (dd->hfi1_snoop.mode_flag)
9714 dd_dev_info(dd, "Set lid/lmc while snooping");
9715
9716 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9717 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9718 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009719 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009720 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9721 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9722 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9723
9724 /*
9725 * Iterate over all the send contexts and set their SLID check
9726 */
9727 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9728 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9729 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9730 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9731
9732 for (i = 0; i < dd->chip_send_contexts; i++) {
9733 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9734 i, (u32)sreg);
9735 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9736 }
9737
9738 /* Now we have to do the same thing for the sdma engines */
9739 sdma_update_lmc(dd, mask, ppd->lid);
9740}
9741
9742static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9743{
9744 unsigned long timeout;
9745 u32 curr_state;
9746
9747 timeout = jiffies + msecs_to_jiffies(msecs);
9748 while (1) {
9749 curr_state = read_physical_state(dd);
9750 if (curr_state == state)
9751 break;
9752 if (time_after(jiffies, timeout)) {
9753 dd_dev_err(dd,
9754 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9755 state, curr_state);
9756 return -ETIMEDOUT;
9757 }
9758 usleep_range(1950, 2050); /* sleep 2ms-ish */
9759 }
9760
9761 return 0;
9762}
9763
9764/*
9765 * Helper for set_link_state(). Do not call except from that routine.
9766 * Expects ppd->hls_mutex to be held.
9767 *
9768 * @rem_reason value to be sent to the neighbor
9769 *
9770 * LinkDownReasons only set if transition succeeds.
9771 */
9772static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9773{
9774 struct hfi1_devdata *dd = ppd->dd;
9775 u32 pstate, previous_state;
9776 u32 last_local_state;
9777 u32 last_remote_state;
9778 int ret;
9779 int do_transition;
9780 int do_wait;
9781
9782 previous_state = ppd->host_link_state;
9783 ppd->host_link_state = HLS_GOING_OFFLINE;
9784 pstate = read_physical_state(dd);
9785 if (pstate == PLS_OFFLINE) {
9786 do_transition = 0; /* in right state */
9787 do_wait = 0; /* ...no need to wait */
9788 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9789 do_transition = 0; /* in an offline transient state */
9790 do_wait = 1; /* ...wait for it to settle */
9791 } else {
9792 do_transition = 1; /* need to move to offline */
9793 do_wait = 1; /* ...will need to wait */
9794 }
9795
9796 if (do_transition) {
9797 ret = set_physical_link_state(dd,
9798 PLS_OFFLINE | (rem_reason << 8));
9799
9800 if (ret != HCMD_SUCCESS) {
9801 dd_dev_err(dd,
9802 "Failed to transition to Offline link state, return %d\n",
9803 ret);
9804 return -EINVAL;
9805 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009806 if (ppd->offline_disabled_reason ==
9807 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009808 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009809 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009810 }
9811
9812 if (do_wait) {
9813 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009814 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009815 if (ret < 0)
9816 return ret;
9817 }
9818
9819 /* make sure the logical state is also down */
9820 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9821
9822 /*
9823 * Now in charge of LCB - must be after the physical state is
9824 * offline.quiet and before host_link_state is changed.
9825 */
9826 set_host_lcb_access(dd);
9827 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9828 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9829
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009830 if (ppd->port_type == PORT_TYPE_QSFP &&
9831 ppd->qsfp_info.limiting_active &&
9832 qsfp_mod_present(ppd)) {
9833 set_qsfp_tx(ppd, 0);
9834 }
9835
Mike Marciniszyn77241052015-07-30 15:17:43 -04009836 /*
9837 * The LNI has a mandatory wait time after the physical state
9838 * moves to Offline.Quiet. The wait time may be different
9839 * depending on how the link went down. The 8051 firmware
9840 * will observe the needed wait time and only move to ready
9841 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009842 * is 6s, so wait that long and then at least 0.5s more for
9843 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009844 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009845 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009846 if (ret) {
9847 dd_dev_err(dd,
9848 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9849 /* state is really offline, so make it so */
9850 ppd->host_link_state = HLS_DN_OFFLINE;
9851 return ret;
9852 }
9853
9854 /*
9855 * The state is now offline and the 8051 is ready to accept host
9856 * requests.
9857 * - change our state
9858 * - notify others if we were previously in a linkup state
9859 */
9860 ppd->host_link_state = HLS_DN_OFFLINE;
9861 if (previous_state & HLS_UP) {
9862 /* went down while link was up */
9863 handle_linkup_change(dd, 0);
9864 } else if (previous_state
9865 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9866 /* went down while attempting link up */
9867 /* byte 1 of last_*_state is the failure reason */
9868 read_last_local_state(dd, &last_local_state);
9869 read_last_remote_state(dd, &last_remote_state);
9870 dd_dev_err(dd,
9871 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9872 last_local_state, last_remote_state);
9873 }
9874
9875 /* the active link width (downgrade) is 0 on link down */
9876 ppd->link_width_active = 0;
9877 ppd->link_width_downgrade_tx_active = 0;
9878 ppd->link_width_downgrade_rx_active = 0;
9879 ppd->current_egress_rate = 0;
9880 return 0;
9881}
9882
9883/* return the link state name */
9884static const char *link_state_name(u32 state)
9885{
9886 const char *name;
9887 int n = ilog2(state);
9888 static const char * const names[] = {
9889 [__HLS_UP_INIT_BP] = "INIT",
9890 [__HLS_UP_ARMED_BP] = "ARMED",
9891 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9892 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9893 [__HLS_DN_POLL_BP] = "POLL",
9894 [__HLS_DN_DISABLE_BP] = "DISABLE",
9895 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9896 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9897 [__HLS_GOING_UP_BP] = "GOING_UP",
9898 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9899 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9900 };
9901
9902 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9903 return name ? name : "unknown";
9904}
9905
9906/* return the link state reason name */
9907static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9908{
9909 if (state == HLS_UP_INIT) {
9910 switch (ppd->linkinit_reason) {
9911 case OPA_LINKINIT_REASON_LINKUP:
9912 return "(LINKUP)";
9913 case OPA_LINKINIT_REASON_FLAPPING:
9914 return "(FLAPPING)";
9915 case OPA_LINKINIT_OUTSIDE_POLICY:
9916 return "(OUTSIDE_POLICY)";
9917 case OPA_LINKINIT_QUARANTINED:
9918 return "(QUARANTINED)";
9919 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9920 return "(INSUFIC_CAPABILITY)";
9921 default:
9922 break;
9923 }
9924 }
9925 return "";
9926}
9927
9928/*
9929 * driver_physical_state - convert the driver's notion of a port's
9930 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9931 * Return -1 (converted to a u32) to indicate error.
9932 */
9933u32 driver_physical_state(struct hfi1_pportdata *ppd)
9934{
9935 switch (ppd->host_link_state) {
9936 case HLS_UP_INIT:
9937 case HLS_UP_ARMED:
9938 case HLS_UP_ACTIVE:
9939 return IB_PORTPHYSSTATE_LINKUP;
9940 case HLS_DN_POLL:
9941 return IB_PORTPHYSSTATE_POLLING;
9942 case HLS_DN_DISABLE:
9943 return IB_PORTPHYSSTATE_DISABLED;
9944 case HLS_DN_OFFLINE:
9945 return OPA_PORTPHYSSTATE_OFFLINE;
9946 case HLS_VERIFY_CAP:
9947 return IB_PORTPHYSSTATE_POLLING;
9948 case HLS_GOING_UP:
9949 return IB_PORTPHYSSTATE_POLLING;
9950 case HLS_GOING_OFFLINE:
9951 return OPA_PORTPHYSSTATE_OFFLINE;
9952 case HLS_LINK_COOLDOWN:
9953 return OPA_PORTPHYSSTATE_OFFLINE;
9954 case HLS_DN_DOWNDEF:
9955 default:
9956 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9957 ppd->host_link_state);
9958 return -1;
9959 }
9960}
9961
9962/*
9963 * driver_logical_state - convert the driver's notion of a port's
9964 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9965 * (converted to a u32) to indicate error.
9966 */
9967u32 driver_logical_state(struct hfi1_pportdata *ppd)
9968{
9969 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9970 return IB_PORT_DOWN;
9971
9972 switch (ppd->host_link_state & HLS_UP) {
9973 case HLS_UP_INIT:
9974 return IB_PORT_INIT;
9975 case HLS_UP_ARMED:
9976 return IB_PORT_ARMED;
9977 case HLS_UP_ACTIVE:
9978 return IB_PORT_ACTIVE;
9979 default:
9980 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9981 ppd->host_link_state);
9982 return -1;
9983 }
9984}
9985
9986void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9987 u8 neigh_reason, u8 rem_reason)
9988{
9989 if (ppd->local_link_down_reason.latest == 0 &&
9990 ppd->neigh_link_down_reason.latest == 0) {
9991 ppd->local_link_down_reason.latest = lcl_reason;
9992 ppd->neigh_link_down_reason.latest = neigh_reason;
9993 ppd->remote_link_down_reason = rem_reason;
9994 }
9995}
9996
9997/*
9998 * Change the physical and/or logical link state.
9999 *
10000 * Do not call this routine while inside an interrupt. It contains
10001 * calls to routines that can take multiple seconds to finish.
10002 *
10003 * Returns 0 on success, -errno on failure.
10004 */
10005int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10006{
10007 struct hfi1_devdata *dd = ppd->dd;
10008 struct ib_event event = {.device = NULL};
10009 int ret1, ret = 0;
10010 int was_up, is_down;
10011 int orig_new_state, poll_bounce;
10012
10013 mutex_lock(&ppd->hls_lock);
10014
10015 orig_new_state = state;
10016 if (state == HLS_DN_DOWNDEF)
10017 state = dd->link_default;
10018
10019 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010020 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10021 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010022
10023 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10024 link_state_name(ppd->host_link_state),
10025 link_state_name(orig_new_state),
10026 poll_bounce ? "(bounce) " : "",
10027 link_state_reason_name(ppd, state));
10028
10029 was_up = !!(ppd->host_link_state & HLS_UP);
10030
10031 /*
10032 * If we're going to a (HLS_*) link state that implies the logical
10033 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10034 * reset is_sm_config_started to 0.
10035 */
10036 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10037 ppd->is_sm_config_started = 0;
10038
10039 /*
10040 * Do nothing if the states match. Let a poll to poll link bounce
10041 * go through.
10042 */
10043 if (ppd->host_link_state == state && !poll_bounce)
10044 goto done;
10045
10046 switch (state) {
10047 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010048 if (ppd->host_link_state == HLS_DN_POLL &&
10049 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010050 /*
10051 * Quick link up jumps from polling to here.
10052 *
10053 * Whether in normal or loopback mode, the
10054 * simulator jumps from polling to link up.
10055 * Accept that here.
10056 */
10057 /* OK */;
10058 } else if (ppd->host_link_state != HLS_GOING_UP) {
10059 goto unexpected;
10060 }
10061
10062 ppd->host_link_state = HLS_UP_INIT;
10063 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10064 if (ret) {
10065 /* logical state didn't change, stay at going_up */
10066 ppd->host_link_state = HLS_GOING_UP;
10067 dd_dev_err(dd,
10068 "%s: logical state did not change to INIT\n",
10069 __func__);
10070 } else {
10071 /* clear old transient LINKINIT_REASON code */
10072 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10073 ppd->linkinit_reason =
10074 OPA_LINKINIT_REASON_LINKUP;
10075
10076 /* enable the port */
10077 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10078
10079 handle_linkup_change(dd, 1);
10080 }
10081 break;
10082 case HLS_UP_ARMED:
10083 if (ppd->host_link_state != HLS_UP_INIT)
10084 goto unexpected;
10085
10086 ppd->host_link_state = HLS_UP_ARMED;
10087 set_logical_state(dd, LSTATE_ARMED);
10088 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10089 if (ret) {
10090 /* logical state didn't change, stay at init */
10091 ppd->host_link_state = HLS_UP_INIT;
10092 dd_dev_err(dd,
10093 "%s: logical state did not change to ARMED\n",
10094 __func__);
10095 }
10096 /*
10097 * The simulator does not currently implement SMA messages,
10098 * so neighbor_normal is not set. Set it here when we first
10099 * move to Armed.
10100 */
10101 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10102 ppd->neighbor_normal = 1;
10103 break;
10104 case HLS_UP_ACTIVE:
10105 if (ppd->host_link_state != HLS_UP_ARMED)
10106 goto unexpected;
10107
10108 ppd->host_link_state = HLS_UP_ACTIVE;
10109 set_logical_state(dd, LSTATE_ACTIVE);
10110 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10111 if (ret) {
10112 /* logical state didn't change, stay at armed */
10113 ppd->host_link_state = HLS_UP_ARMED;
10114 dd_dev_err(dd,
10115 "%s: logical state did not change to ACTIVE\n",
10116 __func__);
10117 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010118 /* tell all engines to go running */
10119 sdma_all_running(dd);
10120
10121 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010122 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010123 event.element.port_num = ppd->port;
10124 event.event = IB_EVENT_PORT_ACTIVE;
10125 }
10126 break;
10127 case HLS_DN_POLL:
10128 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10129 ppd->host_link_state == HLS_DN_OFFLINE) &&
10130 dd->dc_shutdown)
10131 dc_start(dd);
10132 /* Hand LED control to the DC */
10133 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10134
10135 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10136 u8 tmp = ppd->link_enabled;
10137
10138 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10139 if (ret) {
10140 ppd->link_enabled = tmp;
10141 break;
10142 }
10143 ppd->remote_link_down_reason = 0;
10144
10145 if (ppd->driver_link_ready)
10146 ppd->link_enabled = 1;
10147 }
10148
Jim Snowfb9036d2016-01-11 18:32:21 -050010149 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010150 ret = set_local_link_attributes(ppd);
10151 if (ret)
10152 break;
10153
10154 ppd->port_error_action = 0;
10155 ppd->host_link_state = HLS_DN_POLL;
10156
10157 if (quick_linkup) {
10158 /* quick linkup does not go into polling */
10159 ret = do_quick_linkup(dd);
10160 } else {
10161 ret1 = set_physical_link_state(dd, PLS_POLLING);
10162 if (ret1 != HCMD_SUCCESS) {
10163 dd_dev_err(dd,
10164 "Failed to transition to Polling link state, return 0x%x\n",
10165 ret1);
10166 ret = -EINVAL;
10167 }
10168 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010169 ppd->offline_disabled_reason =
10170 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010171 /*
10172 * If an error occurred above, go back to offline. The
10173 * caller may reschedule another attempt.
10174 */
10175 if (ret)
10176 goto_offline(ppd, 0);
10177 break;
10178 case HLS_DN_DISABLE:
10179 /* link is disabled */
10180 ppd->link_enabled = 0;
10181
10182 /* allow any state to transition to disabled */
10183
10184 /* must transition to offline first */
10185 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10186 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10187 if (ret)
10188 break;
10189 ppd->remote_link_down_reason = 0;
10190 }
10191
10192 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10193 if (ret1 != HCMD_SUCCESS) {
10194 dd_dev_err(dd,
10195 "Failed to transition to Disabled link state, return 0x%x\n",
10196 ret1);
10197 ret = -EINVAL;
10198 break;
10199 }
10200 ppd->host_link_state = HLS_DN_DISABLE;
10201 dc_shutdown(dd);
10202 break;
10203 case HLS_DN_OFFLINE:
10204 if (ppd->host_link_state == HLS_DN_DISABLE)
10205 dc_start(dd);
10206
10207 /* allow any state to transition to offline */
10208 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10209 if (!ret)
10210 ppd->remote_link_down_reason = 0;
10211 break;
10212 case HLS_VERIFY_CAP:
10213 if (ppd->host_link_state != HLS_DN_POLL)
10214 goto unexpected;
10215 ppd->host_link_state = HLS_VERIFY_CAP;
10216 break;
10217 case HLS_GOING_UP:
10218 if (ppd->host_link_state != HLS_VERIFY_CAP)
10219 goto unexpected;
10220
10221 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10222 if (ret1 != HCMD_SUCCESS) {
10223 dd_dev_err(dd,
10224 "Failed to transition to link up state, return 0x%x\n",
10225 ret1);
10226 ret = -EINVAL;
10227 break;
10228 }
10229 ppd->host_link_state = HLS_GOING_UP;
10230 break;
10231
10232 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10233 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10234 default:
10235 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10236 __func__, state);
10237 ret = -EINVAL;
10238 break;
10239 }
10240
10241 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10242 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10243
10244 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10245 ppd->neigh_link_down_reason.sma == 0) {
10246 ppd->local_link_down_reason.sma =
10247 ppd->local_link_down_reason.latest;
10248 ppd->neigh_link_down_reason.sma =
10249 ppd->neigh_link_down_reason.latest;
10250 }
10251
10252 goto done;
10253
10254unexpected:
10255 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10256 __func__, link_state_name(ppd->host_link_state),
10257 link_state_name(state));
10258 ret = -EINVAL;
10259
10260done:
10261 mutex_unlock(&ppd->hls_lock);
10262
10263 if (event.device)
10264 ib_dispatch_event(&event);
10265
10266 return ret;
10267}
10268
10269int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10270{
10271 u64 reg;
10272 int ret = 0;
10273
10274 switch (which) {
10275 case HFI1_IB_CFG_LIDLMC:
10276 set_lidlmc(ppd);
10277 break;
10278 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10279 /*
10280 * The VL Arbitrator high limit is sent in units of 4k
10281 * bytes, while HFI stores it in units of 64 bytes.
10282 */
Jubin John8638b772016-02-14 20:19:24 -080010283 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010284 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10285 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10286 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10287 break;
10288 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10289 /* HFI only supports POLL as the default link down state */
10290 if (val != HLS_DN_POLL)
10291 ret = -EINVAL;
10292 break;
10293 case HFI1_IB_CFG_OP_VLS:
10294 if (ppd->vls_operational != val) {
10295 ppd->vls_operational = val;
10296 if (!ppd->port)
10297 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010298 }
10299 break;
10300 /*
10301 * For link width, link width downgrade, and speed enable, always AND
10302 * the setting with what is actually supported. This has two benefits.
10303 * First, enabled can't have unsupported values, no matter what the
10304 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10305 * "fill in with your supported value" have all the bits in the
10306 * field set, so simply ANDing with supported has the desired result.
10307 */
10308 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10309 ppd->link_width_enabled = val & ppd->link_width_supported;
10310 break;
10311 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10312 ppd->link_width_downgrade_enabled =
10313 val & ppd->link_width_downgrade_supported;
10314 break;
10315 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10316 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10317 break;
10318 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10319 /*
10320 * HFI does not follow IB specs, save this value
10321 * so we can report it, if asked.
10322 */
10323 ppd->overrun_threshold = val;
10324 break;
10325 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10326 /*
10327 * HFI does not follow IB specs, save this value
10328 * so we can report it, if asked.
10329 */
10330 ppd->phy_error_threshold = val;
10331 break;
10332
10333 case HFI1_IB_CFG_MTU:
10334 set_send_length(ppd);
10335 break;
10336
10337 case HFI1_IB_CFG_PKEYS:
10338 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10339 set_partition_keys(ppd);
10340 break;
10341
10342 default:
10343 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10344 dd_dev_info(ppd->dd,
10345 "%s: which %s, val 0x%x: not implemented\n",
10346 __func__, ib_cfg_name(which), val);
10347 break;
10348 }
10349 return ret;
10350}
10351
10352/* begin functions related to vl arbitration table caching */
10353static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10354{
10355 int i;
10356
10357 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10358 VL_ARB_LOW_PRIO_TABLE_SIZE);
10359 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10360 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10361
10362 /*
10363 * Note that we always return values directly from the
10364 * 'vl_arb_cache' (and do no CSR reads) in response to a
10365 * 'Get(VLArbTable)'. This is obviously correct after a
10366 * 'Set(VLArbTable)', since the cache will then be up to
10367 * date. But it's also correct prior to any 'Set(VLArbTable)'
10368 * since then both the cache, and the relevant h/w registers
10369 * will be zeroed.
10370 */
10371
10372 for (i = 0; i < MAX_PRIO_TABLE; i++)
10373 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10374}
10375
10376/*
10377 * vl_arb_lock_cache
10378 *
10379 * All other vl_arb_* functions should be called only after locking
10380 * the cache.
10381 */
10382static inline struct vl_arb_cache *
10383vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10384{
10385 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10386 return NULL;
10387 spin_lock(&ppd->vl_arb_cache[idx].lock);
10388 return &ppd->vl_arb_cache[idx];
10389}
10390
10391static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10392{
10393 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10394}
10395
10396static void vl_arb_get_cache(struct vl_arb_cache *cache,
10397 struct ib_vl_weight_elem *vl)
10398{
10399 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10400}
10401
10402static void vl_arb_set_cache(struct vl_arb_cache *cache,
10403 struct ib_vl_weight_elem *vl)
10404{
10405 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10406}
10407
10408static int vl_arb_match_cache(struct vl_arb_cache *cache,
10409 struct ib_vl_weight_elem *vl)
10410{
10411 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10412}
10413/* end functions related to vl arbitration table caching */
10414
10415static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10416 u32 size, struct ib_vl_weight_elem *vl)
10417{
10418 struct hfi1_devdata *dd = ppd->dd;
10419 u64 reg;
10420 unsigned int i, is_up = 0;
10421 int drain, ret = 0;
10422
10423 mutex_lock(&ppd->hls_lock);
10424
10425 if (ppd->host_link_state & HLS_UP)
10426 is_up = 1;
10427
10428 drain = !is_ax(dd) && is_up;
10429
10430 if (drain)
10431 /*
10432 * Before adjusting VL arbitration weights, empty per-VL
10433 * FIFOs, otherwise a packet whose VL weight is being
10434 * set to 0 could get stuck in a FIFO with no chance to
10435 * egress.
10436 */
10437 ret = stop_drain_data_vls(dd);
10438
10439 if (ret) {
10440 dd_dev_err(
10441 dd,
10442 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10443 __func__);
10444 goto err;
10445 }
10446
10447 for (i = 0; i < size; i++, vl++) {
10448 /*
10449 * NOTE: The low priority shift and mask are used here, but
10450 * they are the same for both the low and high registers.
10451 */
10452 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10453 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10454 | (((u64)vl->weight
10455 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10456 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10457 write_csr(dd, target + (i * 8), reg);
10458 }
10459 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10460
10461 if (drain)
10462 open_fill_data_vls(dd); /* reopen all VLs */
10463
10464err:
10465 mutex_unlock(&ppd->hls_lock);
10466
10467 return ret;
10468}
10469
10470/*
10471 * Read one credit merge VL register.
10472 */
10473static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10474 struct vl_limit *vll)
10475{
10476 u64 reg = read_csr(dd, csr);
10477
10478 vll->dedicated = cpu_to_be16(
10479 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10480 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10481 vll->shared = cpu_to_be16(
10482 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10483 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10484}
10485
10486/*
10487 * Read the current credit merge limits.
10488 */
10489static int get_buffer_control(struct hfi1_devdata *dd,
10490 struct buffer_control *bc, u16 *overall_limit)
10491{
10492 u64 reg;
10493 int i;
10494
10495 /* not all entries are filled in */
10496 memset(bc, 0, sizeof(*bc));
10497
10498 /* OPA and HFI have a 1-1 mapping */
10499 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010500 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010501
10502 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10503 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10504
10505 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10506 bc->overall_shared_limit = cpu_to_be16(
10507 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10508 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10509 if (overall_limit)
10510 *overall_limit = (reg
10511 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10512 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10513 return sizeof(struct buffer_control);
10514}
10515
10516static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10517{
10518 u64 reg;
10519 int i;
10520
10521 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10522 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10523 for (i = 0; i < sizeof(u64); i++) {
10524 u8 byte = *(((u8 *)&reg) + i);
10525
10526 dp->vlnt[2 * i] = byte & 0xf;
10527 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10528 }
10529
10530 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10531 for (i = 0; i < sizeof(u64); i++) {
10532 u8 byte = *(((u8 *)&reg) + i);
10533
10534 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10535 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10536 }
10537 return sizeof(struct sc2vlnt);
10538}
10539
10540static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10541 struct ib_vl_weight_elem *vl)
10542{
10543 unsigned int i;
10544
10545 for (i = 0; i < nelems; i++, vl++) {
10546 vl->vl = 0xf;
10547 vl->weight = 0;
10548 }
10549}
10550
10551static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10552{
10553 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10554 DC_SC_VL_VAL(15_0,
10555 0, dp->vlnt[0] & 0xf,
10556 1, dp->vlnt[1] & 0xf,
10557 2, dp->vlnt[2] & 0xf,
10558 3, dp->vlnt[3] & 0xf,
10559 4, dp->vlnt[4] & 0xf,
10560 5, dp->vlnt[5] & 0xf,
10561 6, dp->vlnt[6] & 0xf,
10562 7, dp->vlnt[7] & 0xf,
10563 8, dp->vlnt[8] & 0xf,
10564 9, dp->vlnt[9] & 0xf,
10565 10, dp->vlnt[10] & 0xf,
10566 11, dp->vlnt[11] & 0xf,
10567 12, dp->vlnt[12] & 0xf,
10568 13, dp->vlnt[13] & 0xf,
10569 14, dp->vlnt[14] & 0xf,
10570 15, dp->vlnt[15] & 0xf));
10571 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10572 DC_SC_VL_VAL(31_16,
10573 16, dp->vlnt[16] & 0xf,
10574 17, dp->vlnt[17] & 0xf,
10575 18, dp->vlnt[18] & 0xf,
10576 19, dp->vlnt[19] & 0xf,
10577 20, dp->vlnt[20] & 0xf,
10578 21, dp->vlnt[21] & 0xf,
10579 22, dp->vlnt[22] & 0xf,
10580 23, dp->vlnt[23] & 0xf,
10581 24, dp->vlnt[24] & 0xf,
10582 25, dp->vlnt[25] & 0xf,
10583 26, dp->vlnt[26] & 0xf,
10584 27, dp->vlnt[27] & 0xf,
10585 28, dp->vlnt[28] & 0xf,
10586 29, dp->vlnt[29] & 0xf,
10587 30, dp->vlnt[30] & 0xf,
10588 31, dp->vlnt[31] & 0xf));
10589}
10590
10591static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10592 u16 limit)
10593{
10594 if (limit != 0)
10595 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10596 what, (int)limit, idx);
10597}
10598
10599/* change only the shared limit portion of SendCmGLobalCredit */
10600static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10601{
10602 u64 reg;
10603
10604 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10605 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10606 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10607 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10608}
10609
10610/* change only the total credit limit portion of SendCmGLobalCredit */
10611static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10612{
10613 u64 reg;
10614
10615 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10616 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10617 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10618 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10619}
10620
10621/* set the given per-VL shared limit */
10622static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10623{
10624 u64 reg;
10625 u32 addr;
10626
10627 if (vl < TXE_NUM_DATA_VL)
10628 addr = SEND_CM_CREDIT_VL + (8 * vl);
10629 else
10630 addr = SEND_CM_CREDIT_VL15;
10631
10632 reg = read_csr(dd, addr);
10633 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10634 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10635 write_csr(dd, addr, reg);
10636}
10637
10638/* set the given per-VL dedicated limit */
10639static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10640{
10641 u64 reg;
10642 u32 addr;
10643
10644 if (vl < TXE_NUM_DATA_VL)
10645 addr = SEND_CM_CREDIT_VL + (8 * vl);
10646 else
10647 addr = SEND_CM_CREDIT_VL15;
10648
10649 reg = read_csr(dd, addr);
10650 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10651 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10652 write_csr(dd, addr, reg);
10653}
10654
10655/* spin until the given per-VL status mask bits clear */
10656static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10657 const char *which)
10658{
10659 unsigned long timeout;
10660 u64 reg;
10661
10662 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10663 while (1) {
10664 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10665
10666 if (reg == 0)
10667 return; /* success */
10668 if (time_after(jiffies, timeout))
10669 break; /* timed out */
10670 udelay(1);
10671 }
10672
10673 dd_dev_err(dd,
10674 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10675 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10676 /*
10677 * If this occurs, it is likely there was a credit loss on the link.
10678 * The only recovery from that is a link bounce.
10679 */
10680 dd_dev_err(dd,
10681 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10682}
10683
10684/*
10685 * The number of credits on the VLs may be changed while everything
10686 * is "live", but the following algorithm must be followed due to
10687 * how the hardware is actually implemented. In particular,
10688 * Return_Credit_Status[] is the only correct status check.
10689 *
10690 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10691 * set Global_Shared_Credit_Limit = 0
10692 * use_all_vl = 1
10693 * mask0 = all VLs that are changing either dedicated or shared limits
10694 * set Shared_Limit[mask0] = 0
10695 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10696 * if (changing any dedicated limit)
10697 * mask1 = all VLs that are lowering dedicated limits
10698 * lower Dedicated_Limit[mask1]
10699 * spin until Return_Credit_Status[mask1] == 0
10700 * raise Dedicated_Limits
10701 * raise Shared_Limits
10702 * raise Global_Shared_Credit_Limit
10703 *
10704 * lower = if the new limit is lower, set the limit to the new value
10705 * raise = if the new limit is higher than the current value (may be changed
10706 * earlier in the algorithm), set the new limit to the new value
10707 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010708int set_buffer_control(struct hfi1_pportdata *ppd,
10709 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040010710{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010711 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010712 u64 changing_mask, ld_mask, stat_mask;
10713 int change_count;
10714 int i, use_all_mask;
10715 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010716 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010717 /*
10718 * A0: add the variable any_shared_limit_changing below and in the
10719 * algorithm above. If removing A0 support, it can be removed.
10720 */
10721 int any_shared_limit_changing;
10722 struct buffer_control cur_bc;
10723 u8 changing[OPA_MAX_VLS];
10724 u8 lowering_dedicated[OPA_MAX_VLS];
10725 u16 cur_total;
10726 u32 new_total = 0;
10727 const u64 all_mask =
10728 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10729 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10730 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10731 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10732 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10733 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10734 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10735 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10736 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10737
10738#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10739#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10740
Mike Marciniszyn77241052015-07-30 15:17:43 -040010741 /* find the new total credits, do sanity check on unused VLs */
10742 for (i = 0; i < OPA_MAX_VLS; i++) {
10743 if (valid_vl(i)) {
10744 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10745 continue;
10746 }
10747 nonzero_msg(dd, i, "dedicated",
10748 be16_to_cpu(new_bc->vl[i].dedicated));
10749 nonzero_msg(dd, i, "shared",
10750 be16_to_cpu(new_bc->vl[i].shared));
10751 new_bc->vl[i].dedicated = 0;
10752 new_bc->vl[i].shared = 0;
10753 }
10754 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010755
Mike Marciniszyn77241052015-07-30 15:17:43 -040010756 /* fetch the current values */
10757 get_buffer_control(dd, &cur_bc, &cur_total);
10758
10759 /*
10760 * Create the masks we will use.
10761 */
10762 memset(changing, 0, sizeof(changing));
10763 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10764 /* NOTE: Assumes that the individual VL bits are adjacent and in
10765 increasing order */
10766 stat_mask =
10767 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10768 changing_mask = 0;
10769 ld_mask = 0;
10770 change_count = 0;
10771 any_shared_limit_changing = 0;
10772 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10773 if (!valid_vl(i))
10774 continue;
10775 this_shared_changing = new_bc->vl[i].shared
10776 != cur_bc.vl[i].shared;
10777 if (this_shared_changing)
10778 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080010779 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10780 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010781 changing[i] = 1;
10782 changing_mask |= stat_mask;
10783 change_count++;
10784 }
10785 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10786 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10787 lowering_dedicated[i] = 1;
10788 ld_mask |= stat_mask;
10789 }
10790 }
10791
10792 /* bracket the credit change with a total adjustment */
10793 if (new_total > cur_total)
10794 set_global_limit(dd, new_total);
10795
10796 /*
10797 * Start the credit change algorithm.
10798 */
10799 use_all_mask = 0;
10800 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010801 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10802 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010803 set_global_shared(dd, 0);
10804 cur_bc.overall_shared_limit = 0;
10805 use_all_mask = 1;
10806 }
10807
10808 for (i = 0; i < NUM_USABLE_VLS; i++) {
10809 if (!valid_vl(i))
10810 continue;
10811
10812 if (changing[i]) {
10813 set_vl_shared(dd, i, 0);
10814 cur_bc.vl[i].shared = 0;
10815 }
10816 }
10817
10818 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10819 "shared");
10820
10821 if (change_count > 0) {
10822 for (i = 0; i < NUM_USABLE_VLS; i++) {
10823 if (!valid_vl(i))
10824 continue;
10825
10826 if (lowering_dedicated[i]) {
10827 set_vl_dedicated(dd, i,
10828 be16_to_cpu(new_bc->vl[i].dedicated));
10829 cur_bc.vl[i].dedicated =
10830 new_bc->vl[i].dedicated;
10831 }
10832 }
10833
10834 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10835
10836 /* now raise all dedicated that are going up */
10837 for (i = 0; i < NUM_USABLE_VLS; i++) {
10838 if (!valid_vl(i))
10839 continue;
10840
10841 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10842 be16_to_cpu(cur_bc.vl[i].dedicated))
10843 set_vl_dedicated(dd, i,
10844 be16_to_cpu(new_bc->vl[i].dedicated));
10845 }
10846 }
10847
10848 /* next raise all shared that are going up */
10849 for (i = 0; i < NUM_USABLE_VLS; i++) {
10850 if (!valid_vl(i))
10851 continue;
10852
10853 if (be16_to_cpu(new_bc->vl[i].shared) >
10854 be16_to_cpu(cur_bc.vl[i].shared))
10855 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10856 }
10857
10858 /* finally raise the global shared */
10859 if (be16_to_cpu(new_bc->overall_shared_limit) >
10860 be16_to_cpu(cur_bc.overall_shared_limit))
10861 set_global_shared(dd,
10862 be16_to_cpu(new_bc->overall_shared_limit));
10863
10864 /* bracket the credit change with a total adjustment */
10865 if (new_total < cur_total)
10866 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010867
10868 /*
10869 * Determine the actual number of operational VLS using the number of
10870 * dedicated and shared credits for each VL.
10871 */
10872 if (change_count > 0) {
10873 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10874 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10875 be16_to_cpu(new_bc->vl[i].shared) > 0)
10876 vl_count++;
10877 ppd->actual_vls_operational = vl_count;
10878 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10879 ppd->actual_vls_operational :
10880 ppd->vls_operational,
10881 NULL);
10882 if (ret == 0)
10883 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10884 ppd->actual_vls_operational :
10885 ppd->vls_operational, NULL);
10886 if (ret)
10887 return ret;
10888 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040010889 return 0;
10890}
10891
10892/*
10893 * Read the given fabric manager table. Return the size of the
10894 * table (in bytes) on success, and a negative error code on
10895 * failure.
10896 */
10897int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10898
10899{
10900 int size;
10901 struct vl_arb_cache *vlc;
10902
10903 switch (which) {
10904 case FM_TBL_VL_HIGH_ARB:
10905 size = 256;
10906 /*
10907 * OPA specifies 128 elements (of 2 bytes each), though
10908 * HFI supports only 16 elements in h/w.
10909 */
10910 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10911 vl_arb_get_cache(vlc, t);
10912 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10913 break;
10914 case FM_TBL_VL_LOW_ARB:
10915 size = 256;
10916 /*
10917 * OPA specifies 128 elements (of 2 bytes each), though
10918 * HFI supports only 16 elements in h/w.
10919 */
10920 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10921 vl_arb_get_cache(vlc, t);
10922 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10923 break;
10924 case FM_TBL_BUFFER_CONTROL:
10925 size = get_buffer_control(ppd->dd, t, NULL);
10926 break;
10927 case FM_TBL_SC2VLNT:
10928 size = get_sc2vlnt(ppd->dd, t);
10929 break;
10930 case FM_TBL_VL_PREEMPT_ELEMS:
10931 size = 256;
10932 /* OPA specifies 128 elements, of 2 bytes each */
10933 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10934 break;
10935 case FM_TBL_VL_PREEMPT_MATRIX:
10936 size = 256;
10937 /*
10938 * OPA specifies that this is the same size as the VL
10939 * arbitration tables (i.e., 256 bytes).
10940 */
10941 break;
10942 default:
10943 return -EINVAL;
10944 }
10945 return size;
10946}
10947
10948/*
10949 * Write the given fabric manager table.
10950 */
10951int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10952{
10953 int ret = 0;
10954 struct vl_arb_cache *vlc;
10955
10956 switch (which) {
10957 case FM_TBL_VL_HIGH_ARB:
10958 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10959 if (vl_arb_match_cache(vlc, t)) {
10960 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10961 break;
10962 }
10963 vl_arb_set_cache(vlc, t);
10964 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10965 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10966 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10967 break;
10968 case FM_TBL_VL_LOW_ARB:
10969 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10970 if (vl_arb_match_cache(vlc, t)) {
10971 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10972 break;
10973 }
10974 vl_arb_set_cache(vlc, t);
10975 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10976 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10977 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10978 break;
10979 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080010980 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010981 break;
10982 case FM_TBL_SC2VLNT:
10983 set_sc2vlnt(ppd->dd, t);
10984 break;
10985 default:
10986 ret = -EINVAL;
10987 }
10988 return ret;
10989}
10990
10991/*
10992 * Disable all data VLs.
10993 *
10994 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10995 */
10996static int disable_data_vls(struct hfi1_devdata *dd)
10997{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010998 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010999 return 1;
11000
11001 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11002
11003 return 0;
11004}
11005
11006/*
11007 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11008 * Just re-enables all data VLs (the "fill" part happens
11009 * automatically - the name was chosen for symmetry with
11010 * stop_drain_data_vls()).
11011 *
11012 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11013 */
11014int open_fill_data_vls(struct hfi1_devdata *dd)
11015{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011016 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011017 return 1;
11018
11019 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11020
11021 return 0;
11022}
11023
11024/*
11025 * drain_data_vls() - assumes that disable_data_vls() has been called,
11026 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11027 * engines to drop to 0.
11028 */
11029static void drain_data_vls(struct hfi1_devdata *dd)
11030{
11031 sc_wait(dd);
11032 sdma_wait(dd);
11033 pause_for_credit_return(dd);
11034}
11035
11036/*
11037 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11038 *
11039 * Use open_fill_data_vls() to resume using data VLs. This pair is
11040 * meant to be used like this:
11041 *
11042 * stop_drain_data_vls(dd);
11043 * // do things with per-VL resources
11044 * open_fill_data_vls(dd);
11045 */
11046int stop_drain_data_vls(struct hfi1_devdata *dd)
11047{
11048 int ret;
11049
11050 ret = disable_data_vls(dd);
11051 if (ret == 0)
11052 drain_data_vls(dd);
11053
11054 return ret;
11055}
11056
11057/*
11058 * Convert a nanosecond time to a cclock count. No matter how slow
11059 * the cclock, a non-zero ns will always have a non-zero result.
11060 */
11061u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11062{
11063 u32 cclocks;
11064
11065 if (dd->icode == ICODE_FPGA_EMULATION)
11066 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11067 else /* simulation pretends to be ASIC */
11068 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11069 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11070 cclocks = 1;
11071 return cclocks;
11072}
11073
11074/*
11075 * Convert a cclock count to nanoseconds. Not matter how slow
11076 * the cclock, a non-zero cclocks will always have a non-zero result.
11077 */
11078u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11079{
11080 u32 ns;
11081
11082 if (dd->icode == ICODE_FPGA_EMULATION)
11083 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11084 else /* simulation pretends to be ASIC */
11085 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11086 if (cclocks && !ns)
11087 ns = 1;
11088 return ns;
11089}
11090
11091/*
11092 * Dynamically adjust the receive interrupt timeout for a context based on
11093 * incoming packet rate.
11094 *
11095 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11096 */
11097static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11098{
11099 struct hfi1_devdata *dd = rcd->dd;
11100 u32 timeout = rcd->rcvavail_timeout;
11101
11102 /*
11103 * This algorithm doubles or halves the timeout depending on whether
11104 * the number of packets received in this interrupt were less than or
11105 * greater equal the interrupt count.
11106 *
11107 * The calculations below do not allow a steady state to be achieved.
11108 * Only at the endpoints it is possible to have an unchanging
11109 * timeout.
11110 */
11111 if (npkts < rcv_intr_count) {
11112 /*
11113 * Not enough packets arrived before the timeout, adjust
11114 * timeout downward.
11115 */
11116 if (timeout < 2) /* already at minimum? */
11117 return;
11118 timeout >>= 1;
11119 } else {
11120 /*
11121 * More than enough packets arrived before the timeout, adjust
11122 * timeout upward.
11123 */
11124 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11125 return;
11126 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11127 }
11128
11129 rcd->rcvavail_timeout = timeout;
11130 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
11131 been verified to be in range */
11132 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11133 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11134}
11135
11136void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11137 u32 intr_adjust, u32 npkts)
11138{
11139 struct hfi1_devdata *dd = rcd->dd;
11140 u64 reg;
11141 u32 ctxt = rcd->ctxt;
11142
11143 /*
11144 * Need to write timeout register before updating RcvHdrHead to ensure
11145 * that a new value is used when the HW decides to restart counting.
11146 */
11147 if (intr_adjust)
11148 adjust_rcv_timeout(rcd, npkts);
11149 if (updegr) {
11150 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11151 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11152 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11153 }
11154 mmiowb();
11155 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11156 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11157 << RCV_HDR_HEAD_HEAD_SHIFT);
11158 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11159 mmiowb();
11160}
11161
11162u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11163{
11164 u32 head, tail;
11165
11166 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11167 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11168
11169 if (rcd->rcvhdrtail_kvaddr)
11170 tail = get_rcvhdrtail(rcd);
11171 else
11172 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11173
11174 return head == tail;
11175}
11176
11177/*
11178 * Context Control and Receive Array encoding for buffer size:
11179 * 0x0 invalid
11180 * 0x1 4 KB
11181 * 0x2 8 KB
11182 * 0x3 16 KB
11183 * 0x4 32 KB
11184 * 0x5 64 KB
11185 * 0x6 128 KB
11186 * 0x7 256 KB
11187 * 0x8 512 KB (Receive Array only)
11188 * 0x9 1 MB (Receive Array only)
11189 * 0xa 2 MB (Receive Array only)
11190 *
11191 * 0xB-0xF - reserved (Receive Array only)
11192 *
11193 *
11194 * This routine assumes that the value has already been sanity checked.
11195 */
11196static u32 encoded_size(u32 size)
11197{
11198 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011199 case 4 * 1024: return 0x1;
11200 case 8 * 1024: return 0x2;
11201 case 16 * 1024: return 0x3;
11202 case 32 * 1024: return 0x4;
11203 case 64 * 1024: return 0x5;
11204 case 128 * 1024: return 0x6;
11205 case 256 * 1024: return 0x7;
11206 case 512 * 1024: return 0x8;
11207 case 1 * 1024 * 1024: return 0x9;
11208 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011209 }
11210 return 0x1; /* if invalid, go with the minimum size */
11211}
11212
11213void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11214{
11215 struct hfi1_ctxtdata *rcd;
11216 u64 rcvctrl, reg;
11217 int did_enable = 0;
11218
11219 rcd = dd->rcd[ctxt];
11220 if (!rcd)
11221 return;
11222
11223 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11224
11225 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11226 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011227 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11228 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011229 /* reset the tail and hdr addresses, and sequence count */
11230 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11231 rcd->rcvhdrq_phys);
11232 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11233 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11234 rcd->rcvhdrqtailaddr_phys);
11235 rcd->seq_cnt = 1;
11236
11237 /* reset the cached receive header queue head value */
11238 rcd->head = 0;
11239
11240 /*
11241 * Zero the receive header queue so we don't get false
11242 * positives when checking the sequence number. The
11243 * sequence numbers could land exactly on the same spot.
11244 * E.g. a rcd restart before the receive header wrapped.
11245 */
11246 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11247
11248 /* starting timeout */
11249 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11250
11251 /* enable the context */
11252 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11253
11254 /* clean the egr buffer size first */
11255 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11256 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11257 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11258 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11259
11260 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11261 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11262 did_enable = 1;
11263
11264 /* zero RcvEgrIndexHead */
11265 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11266
11267 /* set eager count and base index */
11268 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11269 & RCV_EGR_CTRL_EGR_CNT_MASK)
11270 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11271 (((rcd->eager_base >> RCV_SHIFT)
11272 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11273 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11274 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11275
11276 /*
11277 * Set TID (expected) count and base index.
11278 * rcd->expected_count is set to individual RcvArray entries,
11279 * not pairs, and the CSR takes a pair-count in groups of
11280 * four, so divide by 8.
11281 */
11282 reg = (((rcd->expected_count >> RCV_SHIFT)
11283 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11284 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11285 (((rcd->expected_base >> RCV_SHIFT)
11286 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11287 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11288 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011289 if (ctxt == HFI1_CTRL_CTXT)
11290 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011291 }
11292 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11293 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011294 /*
11295 * When receive context is being disabled turn on tail
11296 * update with a dummy tail address and then disable
11297 * receive context.
11298 */
11299 if (dd->rcvhdrtail_dummy_physaddr) {
11300 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11301 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011302 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011303 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11304 }
11305
Mike Marciniszyn77241052015-07-30 15:17:43 -040011306 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11307 }
11308 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11309 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11310 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11311 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11312 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11313 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011314 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11315 /* See comment on RcvCtxtCtrl.TailUpd above */
11316 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11317 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11318 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011319 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11320 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11321 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11322 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11323 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11324 /* In one-packet-per-eager mode, the size comes from
11325 the RcvArray entry. */
11326 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11327 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11328 }
11329 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11330 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11331 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11332 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11333 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11334 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11335 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11336 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11337 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11338 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11339 rcd->rcvctrl = rcvctrl;
11340 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11341 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11342
11343 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011344 if (did_enable &&
11345 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011346 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11347 if (reg != 0) {
11348 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11349 ctxt, reg);
11350 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11351 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11352 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11353 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11354 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11355 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11356 ctxt, reg, reg == 0 ? "not" : "still");
11357 }
11358 }
11359
11360 if (did_enable) {
11361 /*
11362 * The interrupt timeout and count must be set after
11363 * the context is enabled to take effect.
11364 */
11365 /* set interrupt timeout */
11366 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11367 (u64)rcd->rcvavail_timeout <<
11368 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11369
11370 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11371 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11372 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11373 }
11374
11375 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11376 /*
11377 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011378 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11379 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011380 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011381 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11382 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011383}
11384
11385u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11386 u64 **cntrp)
11387{
11388 int ret;
11389 u64 val = 0;
11390
11391 if (namep) {
11392 ret = dd->cntrnameslen;
11393 if (pos != 0) {
11394 dd_dev_err(dd, "read_cntrs does not support indexing");
11395 return 0;
11396 }
11397 *namep = dd->cntrnames;
11398 } else {
11399 const struct cntr_entry *entry;
11400 int i, j;
11401
11402 ret = (dd->ndevcntrs) * sizeof(u64);
11403 if (pos != 0) {
11404 dd_dev_err(dd, "read_cntrs does not support indexing");
11405 return 0;
11406 }
11407
11408 /* Get the start of the block of counters */
11409 *cntrp = dd->cntrs;
11410
11411 /*
11412 * Now go and fill in each counter in the block.
11413 */
11414 for (i = 0; i < DEV_CNTR_LAST; i++) {
11415 entry = &dev_cntrs[i];
11416 hfi1_cdbg(CNTR, "reading %s", entry->name);
11417 if (entry->flags & CNTR_DISABLED) {
11418 /* Nothing */
11419 hfi1_cdbg(CNTR, "\tDisabled\n");
11420 } else {
11421 if (entry->flags & CNTR_VL) {
11422 hfi1_cdbg(CNTR, "\tPer VL\n");
11423 for (j = 0; j < C_VL_COUNT; j++) {
11424 val = entry->rw_cntr(entry,
11425 dd, j,
11426 CNTR_MODE_R,
11427 0);
11428 hfi1_cdbg(
11429 CNTR,
11430 "\t\tRead 0x%llx for %d\n",
11431 val, j);
11432 dd->cntrs[entry->offset + j] =
11433 val;
11434 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011435 } else if (entry->flags & CNTR_SDMA) {
11436 hfi1_cdbg(CNTR,
11437 "\t Per SDMA Engine\n");
11438 for (j = 0; j < dd->chip_sdma_engines;
11439 j++) {
11440 val =
11441 entry->rw_cntr(entry, dd, j,
11442 CNTR_MODE_R, 0);
11443 hfi1_cdbg(CNTR,
11444 "\t\tRead 0x%llx for %d\n",
11445 val, j);
11446 dd->cntrs[entry->offset + j] =
11447 val;
11448 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011449 } else {
11450 val = entry->rw_cntr(entry, dd,
11451 CNTR_INVALID_VL,
11452 CNTR_MODE_R, 0);
11453 dd->cntrs[entry->offset] = val;
11454 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11455 }
11456 }
11457 }
11458 }
11459 return ret;
11460}
11461
11462/*
11463 * Used by sysfs to create files for hfi stats to read
11464 */
11465u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11466 char **namep, u64 **cntrp)
11467{
11468 int ret;
11469 u64 val = 0;
11470
11471 if (namep) {
11472 ret = dd->portcntrnameslen;
11473 if (pos != 0) {
11474 dd_dev_err(dd, "index not supported");
11475 return 0;
11476 }
11477 *namep = dd->portcntrnames;
11478 } else {
11479 const struct cntr_entry *entry;
11480 struct hfi1_pportdata *ppd;
11481 int i, j;
11482
11483 ret = (dd->nportcntrs) * sizeof(u64);
11484 if (pos != 0) {
11485 dd_dev_err(dd, "indexing not supported");
11486 return 0;
11487 }
11488 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11489 *cntrp = ppd->cntrs;
11490
11491 for (i = 0; i < PORT_CNTR_LAST; i++) {
11492 entry = &port_cntrs[i];
11493 hfi1_cdbg(CNTR, "reading %s", entry->name);
11494 if (entry->flags & CNTR_DISABLED) {
11495 /* Nothing */
11496 hfi1_cdbg(CNTR, "\tDisabled\n");
11497 continue;
11498 }
11499
11500 if (entry->flags & CNTR_VL) {
11501 hfi1_cdbg(CNTR, "\tPer VL");
11502 for (j = 0; j < C_VL_COUNT; j++) {
11503 val = entry->rw_cntr(entry, ppd, j,
11504 CNTR_MODE_R,
11505 0);
11506 hfi1_cdbg(
11507 CNTR,
11508 "\t\tRead 0x%llx for %d",
11509 val, j);
11510 ppd->cntrs[entry->offset + j] = val;
11511 }
11512 } else {
11513 val = entry->rw_cntr(entry, ppd,
11514 CNTR_INVALID_VL,
11515 CNTR_MODE_R,
11516 0);
11517 ppd->cntrs[entry->offset] = val;
11518 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11519 }
11520 }
11521 }
11522 return ret;
11523}
11524
11525static void free_cntrs(struct hfi1_devdata *dd)
11526{
11527 struct hfi1_pportdata *ppd;
11528 int i;
11529
11530 if (dd->synth_stats_timer.data)
11531 del_timer_sync(&dd->synth_stats_timer);
11532 dd->synth_stats_timer.data = 0;
11533 ppd = (struct hfi1_pportdata *)(dd + 1);
11534 for (i = 0; i < dd->num_pports; i++, ppd++) {
11535 kfree(ppd->cntrs);
11536 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011537 free_percpu(ppd->ibport_data.rvp.rc_acks);
11538 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11539 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011540 ppd->cntrs = NULL;
11541 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011542 ppd->ibport_data.rvp.rc_acks = NULL;
11543 ppd->ibport_data.rvp.rc_qacks = NULL;
11544 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011545 }
11546 kfree(dd->portcntrnames);
11547 dd->portcntrnames = NULL;
11548 kfree(dd->cntrs);
11549 dd->cntrs = NULL;
11550 kfree(dd->scntrs);
11551 dd->scntrs = NULL;
11552 kfree(dd->cntrnames);
11553 dd->cntrnames = NULL;
11554}
11555
11556#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11557#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11558
11559static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11560 u64 *psval, void *context, int vl)
11561{
11562 u64 val;
11563 u64 sval = *psval;
11564
11565 if (entry->flags & CNTR_DISABLED) {
11566 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11567 return 0;
11568 }
11569
11570 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11571
11572 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11573
11574 /* If its a synthetic counter there is more work we need to do */
11575 if (entry->flags & CNTR_SYNTH) {
11576 if (sval == CNTR_MAX) {
11577 /* No need to read already saturated */
11578 return CNTR_MAX;
11579 }
11580
11581 if (entry->flags & CNTR_32BIT) {
11582 /* 32bit counters can wrap multiple times */
11583 u64 upper = sval >> 32;
11584 u64 lower = (sval << 32) >> 32;
11585
11586 if (lower > val) { /* hw wrapped */
11587 if (upper == CNTR_32BIT_MAX)
11588 val = CNTR_MAX;
11589 else
11590 upper++;
11591 }
11592
11593 if (val != CNTR_MAX)
11594 val = (upper << 32) | val;
11595
11596 } else {
11597 /* If we rolled we are saturated */
11598 if ((val < sval) || (val > CNTR_MAX))
11599 val = CNTR_MAX;
11600 }
11601 }
11602
11603 *psval = val;
11604
11605 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11606
11607 return val;
11608}
11609
11610static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11611 struct cntr_entry *entry,
11612 u64 *psval, void *context, int vl, u64 data)
11613{
11614 u64 val;
11615
11616 if (entry->flags & CNTR_DISABLED) {
11617 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11618 return 0;
11619 }
11620
11621 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11622
11623 if (entry->flags & CNTR_SYNTH) {
11624 *psval = data;
11625 if (entry->flags & CNTR_32BIT) {
11626 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11627 (data << 32) >> 32);
11628 val = data; /* return the full 64bit value */
11629 } else {
11630 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11631 data);
11632 }
11633 } else {
11634 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11635 }
11636
11637 *psval = val;
11638
11639 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11640
11641 return val;
11642}
11643
11644u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11645{
11646 struct cntr_entry *entry;
11647 u64 *sval;
11648
11649 entry = &dev_cntrs[index];
11650 sval = dd->scntrs + entry->offset;
11651
11652 if (vl != CNTR_INVALID_VL)
11653 sval += vl;
11654
11655 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11656}
11657
11658u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11659{
11660 struct cntr_entry *entry;
11661 u64 *sval;
11662
11663 entry = &dev_cntrs[index];
11664 sval = dd->scntrs + entry->offset;
11665
11666 if (vl != CNTR_INVALID_VL)
11667 sval += vl;
11668
11669 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11670}
11671
11672u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11673{
11674 struct cntr_entry *entry;
11675 u64 *sval;
11676
11677 entry = &port_cntrs[index];
11678 sval = ppd->scntrs + entry->offset;
11679
11680 if (vl != CNTR_INVALID_VL)
11681 sval += vl;
11682
11683 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11684 (index <= C_RCV_HDR_OVF_LAST)) {
11685 /* We do not want to bother for disabled contexts */
11686 return 0;
11687 }
11688
11689 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11690}
11691
11692u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11693{
11694 struct cntr_entry *entry;
11695 u64 *sval;
11696
11697 entry = &port_cntrs[index];
11698 sval = ppd->scntrs + entry->offset;
11699
11700 if (vl != CNTR_INVALID_VL)
11701 sval += vl;
11702
11703 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11704 (index <= C_RCV_HDR_OVF_LAST)) {
11705 /* We do not want to bother for disabled contexts */
11706 return 0;
11707 }
11708
11709 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11710}
11711
11712static void update_synth_timer(unsigned long opaque)
11713{
11714 u64 cur_tx;
11715 u64 cur_rx;
11716 u64 total_flits;
11717 u8 update = 0;
11718 int i, j, vl;
11719 struct hfi1_pportdata *ppd;
11720 struct cntr_entry *entry;
11721
11722 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11723
11724 /*
11725 * Rather than keep beating on the CSRs pick a minimal set that we can
11726 * check to watch for potential roll over. We can do this by looking at
11727 * the number of flits sent/recv. If the total flits exceeds 32bits then
11728 * we have to iterate all the counters and update.
11729 */
11730 entry = &dev_cntrs[C_DC_RCV_FLITS];
11731 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11732
11733 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11734 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11735
11736 hfi1_cdbg(
11737 CNTR,
11738 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11739 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11740
11741 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11742 /*
11743 * May not be strictly necessary to update but it won't hurt and
11744 * simplifies the logic here.
11745 */
11746 update = 1;
11747 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11748 dd->unit);
11749 } else {
11750 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11751 hfi1_cdbg(CNTR,
11752 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11753 total_flits, (u64)CNTR_32BIT_MAX);
11754 if (total_flits >= CNTR_32BIT_MAX) {
11755 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11756 dd->unit);
11757 update = 1;
11758 }
11759 }
11760
11761 if (update) {
11762 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11763 for (i = 0; i < DEV_CNTR_LAST; i++) {
11764 entry = &dev_cntrs[i];
11765 if (entry->flags & CNTR_VL) {
11766 for (vl = 0; vl < C_VL_COUNT; vl++)
11767 read_dev_cntr(dd, i, vl);
11768 } else {
11769 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11770 }
11771 }
11772 ppd = (struct hfi1_pportdata *)(dd + 1);
11773 for (i = 0; i < dd->num_pports; i++, ppd++) {
11774 for (j = 0; j < PORT_CNTR_LAST; j++) {
11775 entry = &port_cntrs[j];
11776 if (entry->flags & CNTR_VL) {
11777 for (vl = 0; vl < C_VL_COUNT; vl++)
11778 read_port_cntr(ppd, j, vl);
11779 } else {
11780 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11781 }
11782 }
11783 }
11784
11785 /*
11786 * We want the value in the register. The goal is to keep track
11787 * of the number of "ticks" not the counter value. In other
11788 * words if the register rolls we want to notice it and go ahead
11789 * and force an update.
11790 */
11791 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11792 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11793 CNTR_MODE_R, 0);
11794
11795 entry = &dev_cntrs[C_DC_RCV_FLITS];
11796 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11797 CNTR_MODE_R, 0);
11798
11799 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11800 dd->unit, dd->last_tx, dd->last_rx);
11801
11802 } else {
11803 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11804 }
11805
11806mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11807}
11808
11809#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11810static int init_cntrs(struct hfi1_devdata *dd)
11811{
Dean Luickc024c552016-01-11 18:30:57 -050011812 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011813 size_t sz;
11814 char *p;
11815 char name[C_MAX_NAME];
11816 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011817 const char *bit_type_32 = ",32";
11818 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011819
11820 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011821 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11822 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011823
11824 /***********************/
11825 /* per device counters */
11826 /***********************/
11827
11828 /* size names and determine how many we have*/
11829 dd->ndevcntrs = 0;
11830 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011831
11832 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011833 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11834 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11835 continue;
11836 }
11837
11838 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050011839 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011840 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011841 snprintf(name, C_MAX_NAME, "%s%d",
11842 dev_cntrs[i].name,
11843 vl_from_idx(j));
11844 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011845 /* Add ",32" for 32-bit counters */
11846 if (dev_cntrs[i].flags & CNTR_32BIT)
11847 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011848 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011849 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011850 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011851 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050011852 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011853 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011854 snprintf(name, C_MAX_NAME, "%s%d",
11855 dev_cntrs[i].name, j);
11856 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011857 /* Add ",32" for 32-bit counters */
11858 if (dev_cntrs[i].flags & CNTR_32BIT)
11859 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011860 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011861 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011862 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011863 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011864 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011865 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011866 /* Add ",32" for 32-bit counters */
11867 if (dev_cntrs[i].flags & CNTR_32BIT)
11868 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011869 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011870 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011871 }
11872 }
11873
11874 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011875 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011876 if (!dd->cntrs)
11877 goto bail;
11878
Dean Luickc024c552016-01-11 18:30:57 -050011879 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011880 if (!dd->scntrs)
11881 goto bail;
11882
Mike Marciniszyn77241052015-07-30 15:17:43 -040011883 /* allocate space for the counter names */
11884 dd->cntrnameslen = sz;
11885 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11886 if (!dd->cntrnames)
11887 goto bail;
11888
11889 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011890 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011891 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11892 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011893 } else if (dev_cntrs[i].flags & CNTR_VL) {
11894 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011895 snprintf(name, C_MAX_NAME, "%s%d",
11896 dev_cntrs[i].name,
11897 vl_from_idx(j));
11898 memcpy(p, name, strlen(name));
11899 p += strlen(name);
11900
11901 /* Counter is 32 bits */
11902 if (dev_cntrs[i].flags & CNTR_32BIT) {
11903 memcpy(p, bit_type_32, bit_type_32_sz);
11904 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011905 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011906
Mike Marciniszyn77241052015-07-30 15:17:43 -040011907 *p++ = '\n';
11908 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011909 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11910 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011911 snprintf(name, C_MAX_NAME, "%s%d",
11912 dev_cntrs[i].name, j);
11913 memcpy(p, name, strlen(name));
11914 p += strlen(name);
11915
11916 /* Counter is 32 bits */
11917 if (dev_cntrs[i].flags & CNTR_32BIT) {
11918 memcpy(p, bit_type_32, bit_type_32_sz);
11919 p += bit_type_32_sz;
11920 }
11921
11922 *p++ = '\n';
11923 }
11924 } else {
11925 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11926 p += strlen(dev_cntrs[i].name);
11927
11928 /* Counter is 32 bits */
11929 if (dev_cntrs[i].flags & CNTR_32BIT) {
11930 memcpy(p, bit_type_32, bit_type_32_sz);
11931 p += bit_type_32_sz;
11932 }
11933
11934 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040011935 }
11936 }
11937
11938 /*********************/
11939 /* per port counters */
11940 /*********************/
11941
11942 /*
11943 * Go through the counters for the overflows and disable the ones we
11944 * don't need. This varies based on platform so we need to do it
11945 * dynamically here.
11946 */
11947 rcv_ctxts = dd->num_rcv_contexts;
11948 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11949 i <= C_RCV_HDR_OVF_LAST; i++) {
11950 port_cntrs[i].flags |= CNTR_DISABLED;
11951 }
11952
11953 /* size port counter names and determine how many we have*/
11954 sz = 0;
11955 dd->nportcntrs = 0;
11956 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011957 if (port_cntrs[i].flags & CNTR_DISABLED) {
11958 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11959 continue;
11960 }
11961
11962 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011963 port_cntrs[i].offset = dd->nportcntrs;
11964 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011965 snprintf(name, C_MAX_NAME, "%s%d",
11966 port_cntrs[i].name,
11967 vl_from_idx(j));
11968 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011969 /* Add ",32" for 32-bit counters */
11970 if (port_cntrs[i].flags & CNTR_32BIT)
11971 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011972 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011973 dd->nportcntrs++;
11974 }
11975 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011976 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011977 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011978 /* Add ",32" for 32-bit counters */
11979 if (port_cntrs[i].flags & CNTR_32BIT)
11980 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011981 port_cntrs[i].offset = dd->nportcntrs;
11982 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011983 }
11984 }
11985
11986 /* allocate space for the counter names */
11987 dd->portcntrnameslen = sz;
11988 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11989 if (!dd->portcntrnames)
11990 goto bail;
11991
11992 /* fill in port cntr names */
11993 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11994 if (port_cntrs[i].flags & CNTR_DISABLED)
11995 continue;
11996
11997 if (port_cntrs[i].flags & CNTR_VL) {
11998 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011999 snprintf(name, C_MAX_NAME, "%s%d",
12000 port_cntrs[i].name,
12001 vl_from_idx(j));
12002 memcpy(p, name, strlen(name));
12003 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012004
12005 /* Counter is 32 bits */
12006 if (port_cntrs[i].flags & CNTR_32BIT) {
12007 memcpy(p, bit_type_32, bit_type_32_sz);
12008 p += bit_type_32_sz;
12009 }
12010
Mike Marciniszyn77241052015-07-30 15:17:43 -040012011 *p++ = '\n';
12012 }
12013 } else {
12014 memcpy(p, port_cntrs[i].name,
12015 strlen(port_cntrs[i].name));
12016 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012017
12018 /* Counter is 32 bits */
12019 if (port_cntrs[i].flags & CNTR_32BIT) {
12020 memcpy(p, bit_type_32, bit_type_32_sz);
12021 p += bit_type_32_sz;
12022 }
12023
Mike Marciniszyn77241052015-07-30 15:17:43 -040012024 *p++ = '\n';
12025 }
12026 }
12027
12028 /* allocate per port storage for counter values */
12029 ppd = (struct hfi1_pportdata *)(dd + 1);
12030 for (i = 0; i < dd->num_pports; i++, ppd++) {
12031 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12032 if (!ppd->cntrs)
12033 goto bail;
12034
12035 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12036 if (!ppd->scntrs)
12037 goto bail;
12038 }
12039
12040 /* CPU counters need to be allocated and zeroed */
12041 if (init_cpu_counters(dd))
12042 goto bail;
12043
12044 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12045 return 0;
12046bail:
12047 free_cntrs(dd);
12048 return -ENOMEM;
12049}
12050
Mike Marciniszyn77241052015-07-30 15:17:43 -040012051static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12052{
12053 switch (chip_lstate) {
12054 default:
12055 dd_dev_err(dd,
12056 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12057 chip_lstate);
12058 /* fall through */
12059 case LSTATE_DOWN:
12060 return IB_PORT_DOWN;
12061 case LSTATE_INIT:
12062 return IB_PORT_INIT;
12063 case LSTATE_ARMED:
12064 return IB_PORT_ARMED;
12065 case LSTATE_ACTIVE:
12066 return IB_PORT_ACTIVE;
12067 }
12068}
12069
12070u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12071{
12072 /* look at the HFI meta-states only */
12073 switch (chip_pstate & 0xf0) {
12074 default:
12075 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12076 chip_pstate);
12077 /* fall through */
12078 case PLS_DISABLED:
12079 return IB_PORTPHYSSTATE_DISABLED;
12080 case PLS_OFFLINE:
12081 return OPA_PORTPHYSSTATE_OFFLINE;
12082 case PLS_POLLING:
12083 return IB_PORTPHYSSTATE_POLLING;
12084 case PLS_CONFIGPHY:
12085 return IB_PORTPHYSSTATE_TRAINING;
12086 case PLS_LINKUP:
12087 return IB_PORTPHYSSTATE_LINKUP;
12088 case PLS_PHYTEST:
12089 return IB_PORTPHYSSTATE_PHY_TEST;
12090 }
12091}
12092
12093/* return the OPA port logical state name */
12094const char *opa_lstate_name(u32 lstate)
12095{
12096 static const char * const port_logical_names[] = {
12097 "PORT_NOP",
12098 "PORT_DOWN",
12099 "PORT_INIT",
12100 "PORT_ARMED",
12101 "PORT_ACTIVE",
12102 "PORT_ACTIVE_DEFER",
12103 };
12104 if (lstate < ARRAY_SIZE(port_logical_names))
12105 return port_logical_names[lstate];
12106 return "unknown";
12107}
12108
12109/* return the OPA port physical state name */
12110const char *opa_pstate_name(u32 pstate)
12111{
12112 static const char * const port_physical_names[] = {
12113 "PHYS_NOP",
12114 "reserved1",
12115 "PHYS_POLL",
12116 "PHYS_DISABLED",
12117 "PHYS_TRAINING",
12118 "PHYS_LINKUP",
12119 "PHYS_LINK_ERR_RECOVER",
12120 "PHYS_PHY_TEST",
12121 "reserved8",
12122 "PHYS_OFFLINE",
12123 "PHYS_GANGED",
12124 "PHYS_TEST",
12125 };
12126 if (pstate < ARRAY_SIZE(port_physical_names))
12127 return port_physical_names[pstate];
12128 return "unknown";
12129}
12130
12131/*
12132 * Read the hardware link state and set the driver's cached value of it.
12133 * Return the (new) current value.
12134 */
12135u32 get_logical_state(struct hfi1_pportdata *ppd)
12136{
12137 u32 new_state;
12138
12139 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12140 if (new_state != ppd->lstate) {
12141 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12142 opa_lstate_name(new_state), new_state);
12143 ppd->lstate = new_state;
12144 }
12145 /*
12146 * Set port status flags in the page mapped into userspace
12147 * memory. Do it here to ensure a reliable state - this is
12148 * the only function called by all state handling code.
12149 * Always set the flags due to the fact that the cache value
12150 * might have been changed explicitly outside of this
12151 * function.
12152 */
12153 if (ppd->statusp) {
12154 switch (ppd->lstate) {
12155 case IB_PORT_DOWN:
12156 case IB_PORT_INIT:
12157 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12158 HFI1_STATUS_IB_READY);
12159 break;
12160 case IB_PORT_ARMED:
12161 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12162 break;
12163 case IB_PORT_ACTIVE:
12164 *ppd->statusp |= HFI1_STATUS_IB_READY;
12165 break;
12166 }
12167 }
12168 return ppd->lstate;
12169}
12170
12171/**
12172 * wait_logical_linkstate - wait for an IB link state change to occur
12173 * @ppd: port device
12174 * @state: the state to wait for
12175 * @msecs: the number of milliseconds to wait
12176 *
12177 * Wait up to msecs milliseconds for IB link state change to occur.
12178 * For now, take the easy polling route.
12179 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12180 */
12181static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12182 int msecs)
12183{
12184 unsigned long timeout;
12185
12186 timeout = jiffies + msecs_to_jiffies(msecs);
12187 while (1) {
12188 if (get_logical_state(ppd) == state)
12189 return 0;
12190 if (time_after(jiffies, timeout))
12191 break;
12192 msleep(20);
12193 }
12194 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12195
12196 return -ETIMEDOUT;
12197}
12198
12199u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12200{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012201 u32 pstate;
12202 u32 ib_pstate;
12203
12204 pstate = read_physical_state(ppd->dd);
12205 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012206 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012207 dd_dev_info(ppd->dd,
12208 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12209 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12210 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012211 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012212 }
12213 return ib_pstate;
12214}
12215
12216/*
12217 * Read/modify/write ASIC_QSFP register bits as selected by mask
12218 * data: 0 or 1 in the positions depending on what needs to be written
12219 * dir: 0 for read, 1 for write
12220 * mask: select by setting
12221 * I2CCLK (bit 0)
12222 * I2CDATA (bit 1)
12223 */
12224u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12225 u32 mask)
12226{
12227 u64 qsfp_oe, target_oe;
12228
12229 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12230 if (mask) {
12231 /* We are writing register bits, so lock access */
12232 dir &= mask;
12233 data &= mask;
12234
12235 qsfp_oe = read_csr(dd, target_oe);
12236 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12237 write_csr(dd, target_oe, qsfp_oe);
12238 }
12239 /* We are exclusively reading bits here, but it is unlikely
12240 * we'll get valid data when we set the direction of the pin
12241 * in the same call, so read should call this function again
12242 * to get valid data
12243 */
12244 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12245}
12246
12247#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12248(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12249
12250#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12251(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12252
12253int hfi1_init_ctxt(struct send_context *sc)
12254{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012255 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012256 struct hfi1_devdata *dd = sc->dd;
12257 u64 reg;
12258 u8 set = (sc->type == SC_USER ?
12259 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12260 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12261 reg = read_kctxt_csr(dd, sc->hw_context,
12262 SEND_CTXT_CHECK_ENABLE);
12263 if (set)
12264 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12265 else
12266 SET_STATIC_RATE_CONTROL_SMASK(reg);
12267 write_kctxt_csr(dd, sc->hw_context,
12268 SEND_CTXT_CHECK_ENABLE, reg);
12269 }
12270 return 0;
12271}
12272
12273int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12274{
12275 int ret = 0;
12276 u64 reg;
12277
12278 if (dd->icode != ICODE_RTL_SILICON) {
12279 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12280 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12281 __func__);
12282 return -EINVAL;
12283 }
12284 reg = read_csr(dd, ASIC_STS_THERM);
12285 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12286 ASIC_STS_THERM_CURR_TEMP_MASK);
12287 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12288 ASIC_STS_THERM_LO_TEMP_MASK);
12289 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12290 ASIC_STS_THERM_HI_TEMP_MASK);
12291 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12292 ASIC_STS_THERM_CRIT_TEMP_MASK);
12293 /* triggers is a 3-bit value - 1 bit per trigger. */
12294 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12295
12296 return ret;
12297}
12298
12299/* ========================================================================= */
12300
12301/*
12302 * Enable/disable chip from delivering interrupts.
12303 */
12304void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12305{
12306 int i;
12307
12308 /*
12309 * In HFI, the mask needs to be 1 to allow interrupts.
12310 */
12311 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012312 /* enable all interrupts */
12313 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012314 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012315
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012316 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012317 } else {
12318 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012319 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012320 }
12321}
12322
12323/*
12324 * Clear all interrupt sources on the chip.
12325 */
12326static void clear_all_interrupts(struct hfi1_devdata *dd)
12327{
12328 int i;
12329
12330 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012331 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012332
12333 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12334 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12335 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12336 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12337 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12338 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12339 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12340 for (i = 0; i < dd->chip_send_contexts; i++)
12341 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12342 for (i = 0; i < dd->chip_sdma_engines; i++)
12343 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12344
12345 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12346 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12347 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12348}
12349
12350/* Move to pcie.c? */
12351static void disable_intx(struct pci_dev *pdev)
12352{
12353 pci_intx(pdev, 0);
12354}
12355
12356static void clean_up_interrupts(struct hfi1_devdata *dd)
12357{
12358 int i;
12359
12360 /* remove irqs - must happen before disabling/turning off */
12361 if (dd->num_msix_entries) {
12362 /* MSI-X */
12363 struct hfi1_msix_entry *me = dd->msix_entries;
12364
12365 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012366 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012367 continue;
12368 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012369 free_irq(me->msix.vector, me->arg);
12370 }
12371 } else {
12372 /* INTx */
12373 if (dd->requested_intx_irq) {
12374 free_irq(dd->pcidev->irq, dd);
12375 dd->requested_intx_irq = 0;
12376 }
12377 }
12378
12379 /* turn off interrupts */
12380 if (dd->num_msix_entries) {
12381 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012382 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012383 } else {
12384 /* INTx */
12385 disable_intx(dd->pcidev);
12386 }
12387
12388 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012389 kfree(dd->msix_entries);
12390 dd->msix_entries = NULL;
12391 dd->num_msix_entries = 0;
12392}
12393
12394/*
12395 * Remap the interrupt source from the general handler to the given MSI-X
12396 * interrupt.
12397 */
12398static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12399{
12400 u64 reg;
12401 int m, n;
12402
12403 /* clear from the handled mask of the general interrupt */
12404 m = isrc / 64;
12405 n = isrc % 64;
12406 dd->gi_mask[m] &= ~((u64)1 << n);
12407
12408 /* direct the chip source to the given MSI-X interrupt */
12409 m = isrc / 8;
12410 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012411 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12412 reg &= ~((u64)0xff << (8 * n));
12413 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12414 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012415}
12416
12417static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12418 int engine, int msix_intr)
12419{
12420 /*
12421 * SDMA engine interrupt sources grouped by type, rather than
12422 * engine. Per-engine interrupts are as follows:
12423 * SDMA
12424 * SDMAProgress
12425 * SDMAIdle
12426 */
Jubin John8638b772016-02-14 20:19:24 -080012427 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Mike Marciniszyn77241052015-07-30 15:17:43 -040012428 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012429 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Mike Marciniszyn77241052015-07-30 15:17:43 -040012430 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012431 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Mike Marciniszyn77241052015-07-30 15:17:43 -040012432 msix_intr);
12433}
12434
Mike Marciniszyn77241052015-07-30 15:17:43 -040012435static int request_intx_irq(struct hfi1_devdata *dd)
12436{
12437 int ret;
12438
Jubin John98050712015-11-16 21:59:27 -050012439 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12440 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012441 ret = request_irq(dd->pcidev->irq, general_interrupt,
12442 IRQF_SHARED, dd->intx_name, dd);
12443 if (ret)
12444 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12445 ret);
12446 else
12447 dd->requested_intx_irq = 1;
12448 return ret;
12449}
12450
12451static int request_msix_irqs(struct hfi1_devdata *dd)
12452{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012453 int first_general, last_general;
12454 int first_sdma, last_sdma;
12455 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012456 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012457
12458 /* calculate the ranges we are going to use */
12459 first_general = 0;
12460 first_sdma = last_general = first_general + 1;
12461 first_rx = last_sdma = first_sdma + dd->num_sdma;
12462 last_rx = first_rx + dd->n_krcv_queues;
12463
12464 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012465 * Sanity check - the code expects all SDMA chip source
12466 * interrupts to be in the same CSR, starting at bit 0. Verify
12467 * that this is true by checking the bit location of the start.
12468 */
12469 BUILD_BUG_ON(IS_SDMA_START % 64);
12470
12471 for (i = 0; i < dd->num_msix_entries; i++) {
12472 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12473 const char *err_info;
12474 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012475 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012476 void *arg;
12477 int idx;
12478 struct hfi1_ctxtdata *rcd = NULL;
12479 struct sdma_engine *sde = NULL;
12480
12481 /* obtain the arguments to request_irq */
12482 if (first_general <= i && i < last_general) {
12483 idx = i - first_general;
12484 handler = general_interrupt;
12485 arg = dd;
12486 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012487 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012488 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012489 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012490 } else if (first_sdma <= i && i < last_sdma) {
12491 idx = i - first_sdma;
12492 sde = &dd->per_sdma[idx];
12493 handler = sdma_interrupt;
12494 arg = sde;
12495 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012496 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012497 err_info = "sdma";
12498 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012499 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012500 } else if (first_rx <= i && i < last_rx) {
12501 idx = i - first_rx;
12502 rcd = dd->rcd[idx];
12503 /* no interrupt if no rcd */
12504 if (!rcd)
12505 continue;
12506 /*
12507 * Set the interrupt register and mask for this
12508 * context's interrupt.
12509 */
Jubin John8638b772016-02-14 20:19:24 -080012510 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012511 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012512 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012513 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012514 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012515 arg = rcd;
12516 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012517 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012518 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012519 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012520 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012521 } else {
12522 /* not in our expected range - complain, then
12523 ignore it */
12524 dd_dev_err(dd,
12525 "Unexpected extra MSI-X interrupt %d\n", i);
12526 continue;
12527 }
12528 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012529 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012530 continue;
12531 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012532 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012533
Dean Luickf4f30031c2015-10-26 10:28:44 -040012534 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12535 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012536 if (ret) {
12537 dd_dev_err(dd,
12538 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12539 err_info, me->msix.vector, idx, ret);
12540 return ret;
12541 }
12542 /*
12543 * assign arg after request_irq call, so it will be
12544 * cleaned up
12545 */
12546 me->arg = arg;
12547
Mitko Haralanov957558c2016-02-03 14:33:40 -080012548 ret = hfi1_get_irq_affinity(dd, me);
12549 if (ret)
12550 dd_dev_err(dd,
12551 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012552 }
12553
Mike Marciniszyn77241052015-07-30 15:17:43 -040012554 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012555}
12556
12557/*
12558 * Set the general handler to accept all interrupts, remap all
12559 * chip interrupts back to MSI-X 0.
12560 */
12561static void reset_interrupts(struct hfi1_devdata *dd)
12562{
12563 int i;
12564
12565 /* all interrupts handled by the general handler */
12566 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12567 dd->gi_mask[i] = ~(u64)0;
12568
12569 /* all chip interrupts map to MSI-X 0 */
12570 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012571 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012572}
12573
12574static int set_up_interrupts(struct hfi1_devdata *dd)
12575{
12576 struct hfi1_msix_entry *entries;
12577 u32 total, request;
12578 int i, ret;
12579 int single_interrupt = 0; /* we expect to have all the interrupts */
12580
12581 /*
12582 * Interrupt count:
12583 * 1 general, "slow path" interrupt (includes the SDMA engines
12584 * slow source, SDMACleanupDone)
12585 * N interrupts - one per used SDMA engine
12586 * M interrupt - one per kernel receive context
12587 */
12588 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12589
12590 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12591 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012592 ret = -ENOMEM;
12593 goto fail;
12594 }
12595 /* 1-1 MSI-X entry assignment */
12596 for (i = 0; i < total; i++)
12597 entries[i].msix.entry = i;
12598
12599 /* ask for MSI-X interrupts */
12600 request = total;
12601 request_msix(dd, &request, entries);
12602
12603 if (request == 0) {
12604 /* using INTx */
12605 /* dd->num_msix_entries already zero */
12606 kfree(entries);
12607 single_interrupt = 1;
12608 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12609 } else {
12610 /* using MSI-X */
12611 dd->num_msix_entries = request;
12612 dd->msix_entries = entries;
12613
12614 if (request != total) {
12615 /* using MSI-X, with reduced interrupts */
12616 dd_dev_err(
12617 dd,
12618 "cannot handle reduced interrupt case, want %u, got %u\n",
12619 total, request);
12620 ret = -EINVAL;
12621 goto fail;
12622 }
12623 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12624 }
12625
12626 /* mask all interrupts */
12627 set_intr_state(dd, 0);
12628 /* clear all pending interrupts */
12629 clear_all_interrupts(dd);
12630
12631 /* reset general handler mask, chip MSI-X mappings */
12632 reset_interrupts(dd);
12633
12634 if (single_interrupt)
12635 ret = request_intx_irq(dd);
12636 else
12637 ret = request_msix_irqs(dd);
12638 if (ret)
12639 goto fail;
12640
12641 return 0;
12642
12643fail:
12644 clean_up_interrupts(dd);
12645 return ret;
12646}
12647
12648/*
12649 * Set up context values in dd. Sets:
12650 *
12651 * num_rcv_contexts - number of contexts being used
12652 * n_krcv_queues - number of kernel contexts
12653 * first_user_ctxt - first non-kernel context in array of contexts
12654 * freectxts - number of free user contexts
12655 * num_send_contexts - number of PIO send contexts being used
12656 */
12657static int set_up_context_variables(struct hfi1_devdata *dd)
12658{
12659 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012660 int total_contexts;
12661 int ret;
12662 unsigned ngroups;
12663
12664 /*
12665 * Kernel contexts: (to be fixed later):
12666 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012667 * - Context 0 - control context (VL15/multicast/error)
12668 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012669 */
12670 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012671 /*
12672 * Don't count context 0 in n_krcvqs since
12673 * is isn't used for normal verbs traffic.
12674 *
12675 * krcvqs will reflect number of kernel
12676 * receive contexts above 0.
12677 */
12678 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012679 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012680 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012681 num_kernel_contexts =
12682 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12683 /*
12684 * Every kernel receive context needs an ACK send context.
12685 * one send context is allocated for each VL{0-7} and VL15
12686 */
12687 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12688 dd_dev_err(dd,
12689 "Reducing # kernel rcv contexts to: %d, from %d\n",
12690 (int)(dd->chip_send_contexts - num_vls - 1),
12691 (int)num_kernel_contexts);
12692 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12693 }
12694 /*
12695 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012696 * - default to 1 user context per CPU if num_user_contexts is
12697 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012698 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012699 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012700 num_user_contexts = num_online_cpus();
12701
12702 total_contexts = num_kernel_contexts + num_user_contexts;
12703
12704 /*
12705 * Adjust the counts given a global max.
12706 */
12707 if (total_contexts > dd->chip_rcv_contexts) {
12708 dd_dev_err(dd,
12709 "Reducing # user receive contexts to: %d, from %d\n",
12710 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12711 (int)num_user_contexts);
12712 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12713 /* recalculate */
12714 total_contexts = num_kernel_contexts + num_user_contexts;
12715 }
12716
12717 /* the first N are kernel contexts, the rest are user contexts */
12718 dd->num_rcv_contexts = total_contexts;
12719 dd->n_krcv_queues = num_kernel_contexts;
12720 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012721 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012722 dd->freectxts = num_user_contexts;
12723 dd_dev_info(dd,
12724 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12725 (int)dd->chip_rcv_contexts,
12726 (int)dd->num_rcv_contexts,
12727 (int)dd->n_krcv_queues,
12728 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12729
12730 /*
12731 * Receive array allocation:
12732 * All RcvArray entries are divided into groups of 8. This
12733 * is required by the hardware and will speed up writes to
12734 * consecutive entries by using write-combining of the entire
12735 * cacheline.
12736 *
12737 * The number of groups are evenly divided among all contexts.
12738 * any left over groups will be given to the first N user
12739 * contexts.
12740 */
12741 dd->rcv_entries.group_size = RCV_INCREMENT;
12742 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12743 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12744 dd->rcv_entries.nctxt_extra = ngroups -
12745 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12746 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12747 dd->rcv_entries.ngroups,
12748 dd->rcv_entries.nctxt_extra);
12749 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12750 MAX_EAGER_ENTRIES * 2) {
12751 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12752 dd->rcv_entries.group_size;
12753 dd_dev_info(dd,
12754 "RcvArray group count too high, change to %u\n",
12755 dd->rcv_entries.ngroups);
12756 dd->rcv_entries.nctxt_extra = 0;
12757 }
12758 /*
12759 * PIO send contexts
12760 */
12761 ret = init_sc_pools_and_sizes(dd);
12762 if (ret >= 0) { /* success */
12763 dd->num_send_contexts = ret;
12764 dd_dev_info(
12765 dd,
12766 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12767 dd->chip_send_contexts,
12768 dd->num_send_contexts,
12769 dd->sc_sizes[SC_KERNEL].count,
12770 dd->sc_sizes[SC_ACK].count,
12771 dd->sc_sizes[SC_USER].count);
12772 ret = 0; /* success */
12773 }
12774
12775 return ret;
12776}
12777
12778/*
12779 * Set the device/port partition key table. The MAD code
12780 * will ensure that, at least, the partial management
12781 * partition key is present in the table.
12782 */
12783static void set_partition_keys(struct hfi1_pportdata *ppd)
12784{
12785 struct hfi1_devdata *dd = ppd->dd;
12786 u64 reg = 0;
12787 int i;
12788
12789 dd_dev_info(dd, "Setting partition keys\n");
12790 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12791 reg |= (ppd->pkeys[i] &
12792 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12793 ((i % 4) *
12794 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12795 /* Each register holds 4 PKey values. */
12796 if ((i % 4) == 3) {
12797 write_csr(dd, RCV_PARTITION_KEY +
12798 ((i - 3) * 2), reg);
12799 reg = 0;
12800 }
12801 }
12802
12803 /* Always enable HW pkeys check when pkeys table is set */
12804 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12805}
12806
12807/*
12808 * These CSRs and memories are uninitialized on reset and must be
12809 * written before reading to set the ECC/parity bits.
12810 *
12811 * NOTE: All user context CSRs that are not mmaped write-only
12812 * (e.g. the TID flows) must be initialized even if the driver never
12813 * reads them.
12814 */
12815static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12816{
12817 int i, j;
12818
12819 /* CceIntMap */
12820 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012821 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012822
12823 /* SendCtxtCreditReturnAddr */
12824 for (i = 0; i < dd->chip_send_contexts; i++)
12825 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12826
12827 /* PIO Send buffers */
12828 /* SDMA Send buffers */
12829 /* These are not normally read, and (presently) have no method
12830 to be read, so are not pre-initialized */
12831
12832 /* RcvHdrAddr */
12833 /* RcvHdrTailAddr */
12834 /* RcvTidFlowTable */
12835 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12836 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12837 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12838 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080012839 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012840 }
12841
12842 /* RcvArray */
12843 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080012844 write_csr(dd, RCV_ARRAY + (8 * i),
Mike Marciniszyn77241052015-07-30 15:17:43 -040012845 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12846
12847 /* RcvQPMapTable */
12848 for (i = 0; i < 32; i++)
12849 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12850}
12851
12852/*
12853 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12854 */
12855static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12856 u64 ctrl_bits)
12857{
12858 unsigned long timeout;
12859 u64 reg;
12860
12861 /* is the condition present? */
12862 reg = read_csr(dd, CCE_STATUS);
12863 if ((reg & status_bits) == 0)
12864 return;
12865
12866 /* clear the condition */
12867 write_csr(dd, CCE_CTRL, ctrl_bits);
12868
12869 /* wait for the condition to clear */
12870 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12871 while (1) {
12872 reg = read_csr(dd, CCE_STATUS);
12873 if ((reg & status_bits) == 0)
12874 return;
12875 if (time_after(jiffies, timeout)) {
12876 dd_dev_err(dd,
12877 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12878 status_bits, reg & status_bits);
12879 return;
12880 }
12881 udelay(1);
12882 }
12883}
12884
12885/* set CCE CSRs to chip reset defaults */
12886static void reset_cce_csrs(struct hfi1_devdata *dd)
12887{
12888 int i;
12889
12890 /* CCE_REVISION read-only */
12891 /* CCE_REVISION2 read-only */
12892 /* CCE_CTRL - bits clear automatically */
12893 /* CCE_STATUS read-only, use CceCtrl to clear */
12894 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12895 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12896 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12897 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12898 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12899 /* CCE_ERR_STATUS read-only */
12900 write_csr(dd, CCE_ERR_MASK, 0);
12901 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12902 /* CCE_ERR_FORCE leave alone */
12903 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12904 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12905 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12906 /* CCE_PCIE_CTRL leave alone */
12907 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12908 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12909 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12910 CCE_MSIX_TABLE_UPPER_RESETCSR);
12911 }
12912 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12913 /* CCE_MSIX_PBA read-only */
12914 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12915 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12916 }
12917 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12918 write_csr(dd, CCE_INT_MAP, 0);
12919 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12920 /* CCE_INT_STATUS read-only */
12921 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12922 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12923 /* CCE_INT_FORCE leave alone */
12924 /* CCE_INT_BLOCKED read-only */
12925 }
12926 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12927 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12928}
12929
12930/* set ASIC CSRs to chip reset defaults */
12931static void reset_asic_csrs(struct hfi1_devdata *dd)
12932{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012933 int i;
12934
12935 /*
12936 * If the HFIs are shared between separate nodes or VMs,
12937 * then more will need to be done here. One idea is a module
12938 * parameter that returns early, letting the first power-on or
12939 * a known first load do the reset and blocking all others.
12940 */
12941
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012942 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12943 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012944
12945 if (dd->icode != ICODE_FPGA_EMULATION) {
12946 /* emulation does not have an SBus - leave these alone */
12947 /*
12948 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12949 * Notes:
12950 * o The reset is not zero if aimed at the core. See the
12951 * SBus documentation for details.
12952 * o If the SBus firmware has been updated (e.g. by the BIOS),
12953 * will the reset revert that?
12954 */
12955 /* ASIC_CFG_SBUS_REQUEST leave alone */
12956 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12957 }
12958 /* ASIC_SBUS_RESULT read-only */
12959 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12960 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12961 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12962 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012963
12964 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012965 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012966
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012967 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012968 /* ASIC_STS_THERM read-only */
12969 /* ASIC_CFG_RESET leave alone */
12970
12971 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12972 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12973 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12974 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12975 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12976 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12977 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12978 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12979 for (i = 0; i < 16; i++)
12980 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12981
12982 /* ASIC_GPIO_IN read-only */
12983 write_csr(dd, ASIC_GPIO_OE, 0);
12984 write_csr(dd, ASIC_GPIO_INVERT, 0);
12985 write_csr(dd, ASIC_GPIO_OUT, 0);
12986 write_csr(dd, ASIC_GPIO_MASK, 0);
12987 /* ASIC_GPIO_STATUS read-only */
12988 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12989 /* ASIC_GPIO_FORCE leave alone */
12990
12991 /* ASIC_QSFP1_IN read-only */
12992 write_csr(dd, ASIC_QSFP1_OE, 0);
12993 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12994 write_csr(dd, ASIC_QSFP1_OUT, 0);
12995 write_csr(dd, ASIC_QSFP1_MASK, 0);
12996 /* ASIC_QSFP1_STATUS read-only */
12997 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12998 /* ASIC_QSFP1_FORCE leave alone */
12999
13000 /* ASIC_QSFP2_IN read-only */
13001 write_csr(dd, ASIC_QSFP2_OE, 0);
13002 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13003 write_csr(dd, ASIC_QSFP2_OUT, 0);
13004 write_csr(dd, ASIC_QSFP2_MASK, 0);
13005 /* ASIC_QSFP2_STATUS read-only */
13006 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13007 /* ASIC_QSFP2_FORCE leave alone */
13008
13009 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13010 /* this also writes a NOP command, clearing paging mode */
13011 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13012 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013013}
13014
13015/* set MISC CSRs to chip reset defaults */
13016static void reset_misc_csrs(struct hfi1_devdata *dd)
13017{
13018 int i;
13019
13020 for (i = 0; i < 32; i++) {
13021 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13022 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13023 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13024 }
13025 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13026 only be written 128-byte chunks */
13027 /* init RSA engine to clear lingering errors */
13028 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13029 write_csr(dd, MISC_CFG_RSA_MU, 0);
13030 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13031 /* MISC_STS_8051_DIGEST read-only */
13032 /* MISC_STS_SBM_DIGEST read-only */
13033 /* MISC_STS_PCIE_DIGEST read-only */
13034 /* MISC_STS_FAB_DIGEST read-only */
13035 /* MISC_ERR_STATUS read-only */
13036 write_csr(dd, MISC_ERR_MASK, 0);
13037 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13038 /* MISC_ERR_FORCE leave alone */
13039}
13040
13041/* set TXE CSRs to chip reset defaults */
13042static void reset_txe_csrs(struct hfi1_devdata *dd)
13043{
13044 int i;
13045
13046 /*
13047 * TXE Kernel CSRs
13048 */
13049 write_csr(dd, SEND_CTRL, 0);
13050 __cm_reset(dd, 0); /* reset CM internal state */
13051 /* SEND_CONTEXTS read-only */
13052 /* SEND_DMA_ENGINES read-only */
13053 /* SEND_PIO_MEM_SIZE read-only */
13054 /* SEND_DMA_MEM_SIZE read-only */
13055 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13056 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13057 /* SEND_PIO_ERR_STATUS read-only */
13058 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13059 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13060 /* SEND_PIO_ERR_FORCE leave alone */
13061 /* SEND_DMA_ERR_STATUS read-only */
13062 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13063 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13064 /* SEND_DMA_ERR_FORCE leave alone */
13065 /* SEND_EGRESS_ERR_STATUS read-only */
13066 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13067 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13068 /* SEND_EGRESS_ERR_FORCE leave alone */
13069 write_csr(dd, SEND_BTH_QP, 0);
13070 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13071 write_csr(dd, SEND_SC2VLT0, 0);
13072 write_csr(dd, SEND_SC2VLT1, 0);
13073 write_csr(dd, SEND_SC2VLT2, 0);
13074 write_csr(dd, SEND_SC2VLT3, 0);
13075 write_csr(dd, SEND_LEN_CHECK0, 0);
13076 write_csr(dd, SEND_LEN_CHECK1, 0);
13077 /* SEND_ERR_STATUS read-only */
13078 write_csr(dd, SEND_ERR_MASK, 0);
13079 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13080 /* SEND_ERR_FORCE read-only */
13081 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013082 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013083 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013084 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13085 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13086 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013087 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013088 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013089 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013090 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013091 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13092 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
13093 SEND_CM_GLOBAL_CREDIT_RESETCSR);
13094 /* SEND_CM_CREDIT_USED_STATUS read-only */
13095 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13096 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13097 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13098 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13099 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13100 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013101 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013102 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13103 /* SEND_CM_CREDIT_USED_VL read-only */
13104 /* SEND_CM_CREDIT_USED_VL15 read-only */
13105 /* SEND_EGRESS_CTXT_STATUS read-only */
13106 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13107 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13108 /* SEND_EGRESS_ERR_INFO read-only */
13109 /* SEND_EGRESS_ERR_SOURCE read-only */
13110
13111 /*
13112 * TXE Per-Context CSRs
13113 */
13114 for (i = 0; i < dd->chip_send_contexts; i++) {
13115 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13116 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13117 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13118 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13119 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13120 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13121 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13122 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13123 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13124 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13125 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13126 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13127 }
13128
13129 /*
13130 * TXE Per-SDMA CSRs
13131 */
13132 for (i = 0; i < dd->chip_sdma_engines; i++) {
13133 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13134 /* SEND_DMA_STATUS read-only */
13135 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13136 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13137 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13138 /* SEND_DMA_HEAD read-only */
13139 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13140 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13141 /* SEND_DMA_IDLE_CNT read-only */
13142 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13143 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13144 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13145 /* SEND_DMA_ENG_ERR_STATUS read-only */
13146 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13147 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13148 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13149 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13150 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13151 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13152 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13153 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13154 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13155 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13156 }
13157}
13158
13159/*
13160 * Expect on entry:
13161 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13162 */
13163static void init_rbufs(struct hfi1_devdata *dd)
13164{
13165 u64 reg;
13166 int count;
13167
13168 /*
13169 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13170 * clear.
13171 */
13172 count = 0;
13173 while (1) {
13174 reg = read_csr(dd, RCV_STATUS);
13175 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13176 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13177 break;
13178 /*
13179 * Give up after 1ms - maximum wait time.
13180 *
13181 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13182 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13183 * 148 KB / (66% * 250MB/s) = 920us
13184 */
13185 if (count++ > 500) {
13186 dd_dev_err(dd,
13187 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13188 __func__, reg);
13189 break;
13190 }
13191 udelay(2); /* do not busy-wait the CSR */
13192 }
13193
13194 /* start the init - expect RcvCtrl to be 0 */
13195 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13196
13197 /*
13198 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13199 * period after the write before RcvStatus.RxRbufInitDone is valid.
13200 * The delay in the first run through the loop below is sufficient and
13201 * required before the first read of RcvStatus.RxRbufInintDone.
13202 */
13203 read_csr(dd, RCV_CTRL);
13204
13205 /* wait for the init to finish */
13206 count = 0;
13207 while (1) {
13208 /* delay is required first time through - see above */
13209 udelay(2); /* do not busy-wait the CSR */
13210 reg = read_csr(dd, RCV_STATUS);
13211 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13212 break;
13213
13214 /* give up after 100us - slowest possible at 33MHz is 73us */
13215 if (count++ > 50) {
13216 dd_dev_err(dd,
13217 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13218 __func__);
13219 break;
13220 }
13221 }
13222}
13223
13224/* set RXE CSRs to chip reset defaults */
13225static void reset_rxe_csrs(struct hfi1_devdata *dd)
13226{
13227 int i, j;
13228
13229 /*
13230 * RXE Kernel CSRs
13231 */
13232 write_csr(dd, RCV_CTRL, 0);
13233 init_rbufs(dd);
13234 /* RCV_STATUS read-only */
13235 /* RCV_CONTEXTS read-only */
13236 /* RCV_ARRAY_CNT read-only */
13237 /* RCV_BUF_SIZE read-only */
13238 write_csr(dd, RCV_BTH_QP, 0);
13239 write_csr(dd, RCV_MULTICAST, 0);
13240 write_csr(dd, RCV_BYPASS, 0);
13241 write_csr(dd, RCV_VL15, 0);
13242 /* this is a clear-down */
13243 write_csr(dd, RCV_ERR_INFO,
13244 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13245 /* RCV_ERR_STATUS read-only */
13246 write_csr(dd, RCV_ERR_MASK, 0);
13247 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13248 /* RCV_ERR_FORCE leave alone */
13249 for (i = 0; i < 32; i++)
13250 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13251 for (i = 0; i < 4; i++)
13252 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13253 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13254 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13255 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13256 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13257 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13258 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13259 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13260 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13261 }
13262 for (i = 0; i < 32; i++)
13263 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13264
13265 /*
13266 * RXE Kernel and User Per-Context CSRs
13267 */
13268 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13269 /* kernel */
13270 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13271 /* RCV_CTXT_STATUS read-only */
13272 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13273 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13274 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13275 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13276 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13277 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13278 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13279 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13280 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13281 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13282
13283 /* user */
13284 /* RCV_HDR_TAIL read-only */
13285 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13286 /* RCV_EGR_INDEX_TAIL read-only */
13287 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13288 /* RCV_EGR_OFFSET_TAIL read-only */
13289 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13290 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13291 0);
13292 }
13293 }
13294}
13295
13296/*
13297 * Set sc2vl tables.
13298 *
13299 * They power on to zeros, so to avoid send context errors
13300 * they need to be set:
13301 *
13302 * SC 0-7 -> VL 0-7 (respectively)
13303 * SC 15 -> VL 15
13304 * otherwise
13305 * -> VL 0
13306 */
13307static void init_sc2vl_tables(struct hfi1_devdata *dd)
13308{
13309 int i;
13310 /* init per architecture spec, constrained by hardware capability */
13311
13312 /* HFI maps sent packets */
13313 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13314 0,
13315 0, 0, 1, 1,
13316 2, 2, 3, 3,
13317 4, 4, 5, 5,
13318 6, 6, 7, 7));
13319 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13320 1,
13321 8, 0, 9, 0,
13322 10, 0, 11, 0,
13323 12, 0, 13, 0,
13324 14, 0, 15, 15));
13325 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13326 2,
13327 16, 0, 17, 0,
13328 18, 0, 19, 0,
13329 20, 0, 21, 0,
13330 22, 0, 23, 0));
13331 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13332 3,
13333 24, 0, 25, 0,
13334 26, 0, 27, 0,
13335 28, 0, 29, 0,
13336 30, 0, 31, 0));
13337
13338 /* DC maps received packets */
13339 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13340 15_0,
13341 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13342 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13343 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13344 31_16,
13345 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13346 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13347
13348 /* initialize the cached sc2vl values consistently with h/w */
13349 for (i = 0; i < 32; i++) {
13350 if (i < 8 || i == 15)
13351 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13352 else
13353 *((u8 *)(dd->sc2vl) + i) = 0;
13354 }
13355}
13356
13357/*
13358 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13359 * depend on the chip going through a power-on reset - a driver may be loaded
13360 * and unloaded many times.
13361 *
13362 * Do not write any CSR values to the chip in this routine - there may be
13363 * a reset following the (possible) FLR in this routine.
13364 *
13365 */
13366static void init_chip(struct hfi1_devdata *dd)
13367{
13368 int i;
13369
13370 /*
13371 * Put the HFI CSRs in a known state.
13372 * Combine this with a DC reset.
13373 *
13374 * Stop the device from doing anything while we do a
13375 * reset. We know there are no other active users of
13376 * the device since we are now in charge. Turn off
13377 * off all outbound and inbound traffic and make sure
13378 * the device does not generate any interrupts.
13379 */
13380
13381 /* disable send contexts and SDMA engines */
13382 write_csr(dd, SEND_CTRL, 0);
13383 for (i = 0; i < dd->chip_send_contexts; i++)
13384 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13385 for (i = 0; i < dd->chip_sdma_engines; i++)
13386 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13387 /* disable port (turn off RXE inbound traffic) and contexts */
13388 write_csr(dd, RCV_CTRL, 0);
13389 for (i = 0; i < dd->chip_rcv_contexts; i++)
13390 write_csr(dd, RCV_CTXT_CTRL, 0);
13391 /* mask all interrupt sources */
13392 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013393 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013394
13395 /*
13396 * DC Reset: do a full DC reset before the register clear.
13397 * A recommended length of time to hold is one CSR read,
13398 * so reread the CceDcCtrl. Then, hold the DC in reset
13399 * across the clear.
13400 */
13401 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013402 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013403
13404 if (use_flr) {
13405 /*
13406 * A FLR will reset the SPC core and part of the PCIe.
13407 * The parts that need to be restored have already been
13408 * saved.
13409 */
13410 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13411
13412 /* do the FLR, the DC reset will remain */
13413 hfi1_pcie_flr(dd);
13414
13415 /* restore command and BARs */
13416 restore_pci_variables(dd);
13417
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013418 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013419 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13420 hfi1_pcie_flr(dd);
13421 restore_pci_variables(dd);
13422 }
13423
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013424 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013425 } else {
13426 dd_dev_info(dd, "Resetting CSRs with writes\n");
13427 reset_cce_csrs(dd);
13428 reset_txe_csrs(dd);
13429 reset_rxe_csrs(dd);
13430 reset_asic_csrs(dd);
13431 reset_misc_csrs(dd);
13432 }
13433 /* clear the DC reset */
13434 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013435
Mike Marciniszyn77241052015-07-30 15:17:43 -040013436 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013437 setextled(dd, 0);
13438
Mike Marciniszyn77241052015-07-30 15:17:43 -040013439 /*
13440 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013441 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013442 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013443 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013444 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013445 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013446 * I2CCLK and I2CDAT will change per direction, and INT_N and
13447 * MODPRS_N are input only and their value is ignored.
13448 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013449 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13450 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013451}
13452
13453static void init_early_variables(struct hfi1_devdata *dd)
13454{
13455 int i;
13456
13457 /* assign link credit variables */
13458 dd->vau = CM_VAU;
13459 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013460 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013461 dd->link_credits--;
13462 dd->vcu = cu_to_vcu(hfi1_cu);
13463 /* enough room for 8 MAD packets plus header - 17K */
13464 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13465 if (dd->vl15_init > dd->link_credits)
13466 dd->vl15_init = dd->link_credits;
13467
13468 write_uninitialized_csrs_and_memories(dd);
13469
13470 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13471 for (i = 0; i < dd->num_pports; i++) {
13472 struct hfi1_pportdata *ppd = &dd->pport[i];
13473
13474 set_partition_keys(ppd);
13475 }
13476 init_sc2vl_tables(dd);
13477}
13478
13479static void init_kdeth_qp(struct hfi1_devdata *dd)
13480{
13481 /* user changed the KDETH_QP */
13482 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13483 /* out of range or illegal value */
13484 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13485 kdeth_qp = 0;
13486 }
13487 if (kdeth_qp == 0) /* not set, or failed range check */
13488 kdeth_qp = DEFAULT_KDETH_QP;
13489
13490 write_csr(dd, SEND_BTH_QP,
13491 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13492 << SEND_BTH_QP_KDETH_QP_SHIFT);
13493
13494 write_csr(dd, RCV_BTH_QP,
13495 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13496 << RCV_BTH_QP_KDETH_QP_SHIFT);
13497}
13498
13499/**
13500 * init_qpmap_table
13501 * @dd - device data
13502 * @first_ctxt - first context
13503 * @last_ctxt - first context
13504 *
13505 * This return sets the qpn mapping table that
13506 * is indexed by qpn[8:1].
13507 *
13508 * The routine will round robin the 256 settings
13509 * from first_ctxt to last_ctxt.
13510 *
13511 * The first/last looks ahead to having specialized
13512 * receive contexts for mgmt and bypass. Normal
13513 * verbs traffic will assumed to be on a range
13514 * of receive contexts.
13515 */
13516static void init_qpmap_table(struct hfi1_devdata *dd,
13517 u32 first_ctxt,
13518 u32 last_ctxt)
13519{
13520 u64 reg = 0;
13521 u64 regno = RCV_QP_MAP_TABLE;
13522 int i;
13523 u64 ctxt = first_ctxt;
13524
13525 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013526 reg |= ctxt << (8 * (i % 8));
13527 i++;
13528 ctxt++;
13529 if (ctxt > last_ctxt)
13530 ctxt = first_ctxt;
13531 if (i % 8 == 0) {
13532 write_csr(dd, regno, reg);
13533 reg = 0;
13534 regno += 8;
13535 }
13536 }
13537 if (i % 8)
13538 write_csr(dd, regno, reg);
13539
13540 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13541 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13542}
13543
13544/**
13545 * init_qos - init RX qos
13546 * @dd - device data
13547 * @first_context
13548 *
13549 * This routine initializes Rule 0 and the
13550 * RSM map table to implement qos.
13551 *
13552 * If all of the limit tests succeed,
13553 * qos is applied based on the array
13554 * interpretation of krcvqs where
13555 * entry 0 is VL0.
13556 *
13557 * The number of vl bits (n) and the number of qpn
13558 * bits (m) are computed to feed both the RSM map table
13559 * and the single rule.
13560 *
13561 */
13562static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13563{
13564 u8 max_by_vl = 0;
13565 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13566 u64 *rsmmap;
13567 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013568 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013569
13570 /* validate */
13571 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13572 num_vls == 1 ||
13573 krcvqsset <= 1)
13574 goto bail;
13575 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13576 if (krcvqs[i] > max_by_vl)
13577 max_by_vl = krcvqs[i];
13578 if (max_by_vl > 32)
13579 goto bail;
13580 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13581 /* determine bits vl */
13582 n = ilog2(num_vls);
13583 /* determine bits for qpn */
13584 m = ilog2(qpns_per_vl);
13585 if ((m + n) > 7)
13586 goto bail;
13587 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13588 goto bail;
13589 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013590 if (!rsmmap)
13591 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013592 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13593 /* init the local copy of the table */
13594 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13595 unsigned tctxt;
13596
13597 for (qpn = 0, tctxt = ctxt;
13598 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13599 unsigned idx, regoff, regidx;
13600
13601 /* generate index <= 128 */
13602 idx = (qpn << n) ^ i;
13603 regoff = (idx % 8) * 8;
13604 regidx = idx / 8;
13605 reg = rsmmap[regidx];
13606 /* replace 0xff with context number */
13607 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13608 << regoff);
13609 reg |= (u64)(tctxt++) << regoff;
13610 rsmmap[regidx] = reg;
13611 if (tctxt == ctxt + krcvqs[i])
13612 tctxt = ctxt;
13613 }
13614 ctxt += krcvqs[i];
13615 }
13616 /* flush cached copies to chip */
13617 for (i = 0; i < NUM_MAP_REGS; i++)
13618 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13619 /* add rule0 */
13620 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13621 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13622 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13623 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13624 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13625 LRH_BTH_MATCH_OFFSET
13626 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13627 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13628 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13629 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13630 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13631 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13632 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13633 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13634 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13635 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13636 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13637 /* Enable RSM */
13638 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13639 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013640 /* map everything else to first context */
13641 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013642 dd->qos_shift = n + 1;
13643 return;
13644bail:
13645 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013646 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013647}
13648
13649static void init_rxe(struct hfi1_devdata *dd)
13650{
13651 /* enable all receive errors */
13652 write_csr(dd, RCV_ERR_MASK, ~0ull);
13653 /* setup QPN map table - start where VL15 context leaves off */
13654 init_qos(
13655 dd,
13656 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13657 /*
13658 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13659 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13660 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13661 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13662 * Max_PayLoad_Size set to its minimum of 128.
13663 *
13664 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13665 * (64 bytes). Max_Payload_Size is possibly modified upward in
13666 * tune_pcie_caps() which is called after this routine.
13667 */
13668}
13669
13670static void init_other(struct hfi1_devdata *dd)
13671{
13672 /* enable all CCE errors */
13673 write_csr(dd, CCE_ERR_MASK, ~0ull);
13674 /* enable *some* Misc errors */
13675 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13676 /* enable all DC errors, except LCB */
13677 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13678 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13679}
13680
13681/*
13682 * Fill out the given AU table using the given CU. A CU is defined in terms
13683 * AUs. The table is a an encoding: given the index, how many AUs does that
13684 * represent?
13685 *
13686 * NOTE: Assumes that the register layout is the same for the
13687 * local and remote tables.
13688 */
13689static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13690 u32 csr0to3, u32 csr4to7)
13691{
13692 write_csr(dd, csr0to3,
13693 0ull <<
13694 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13695 | 1ull <<
13696 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13697 | 2ull * cu <<
13698 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13699 | 4ull * cu <<
13700 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13701 write_csr(dd, csr4to7,
13702 8ull * cu <<
13703 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13704 | 16ull * cu <<
13705 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13706 | 32ull * cu <<
13707 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13708 | 64ull * cu <<
13709 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013710}
13711
13712static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13713{
13714 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13715 SEND_CM_LOCAL_AU_TABLE4_TO7);
13716}
13717
13718void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13719{
13720 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13721 SEND_CM_REMOTE_AU_TABLE4_TO7);
13722}
13723
13724static void init_txe(struct hfi1_devdata *dd)
13725{
13726 int i;
13727
13728 /* enable all PIO, SDMA, general, and Egress errors */
13729 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13730 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13731 write_csr(dd, SEND_ERR_MASK, ~0ull);
13732 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13733
13734 /* enable all per-context and per-SDMA engine errors */
13735 for (i = 0; i < dd->chip_send_contexts; i++)
13736 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13737 for (i = 0; i < dd->chip_sdma_engines; i++)
13738 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13739
13740 /* set the local CU to AU mapping */
13741 assign_local_cm_au_table(dd, dd->vcu);
13742
13743 /*
13744 * Set reasonable default for Credit Return Timer
13745 * Don't set on Simulator - causes it to choke.
13746 */
13747 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13748 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13749}
13750
13751int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13752{
13753 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13754 unsigned sctxt;
13755 int ret = 0;
13756 u64 reg;
13757
13758 if (!rcd || !rcd->sc) {
13759 ret = -EINVAL;
13760 goto done;
13761 }
13762 sctxt = rcd->sc->hw_context;
13763 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13764 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13765 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13766 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13767 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13768 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13769 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13770 /*
13771 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013772 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013773 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013774 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13775 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13776 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13777 }
13778
13779 /* Enable J_KEY check on receive context. */
13780 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13781 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13782 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13783 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13784done:
13785 return ret;
13786}
13787
13788int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13789{
13790 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13791 unsigned sctxt;
13792 int ret = 0;
13793 u64 reg;
13794
13795 if (!rcd || !rcd->sc) {
13796 ret = -EINVAL;
13797 goto done;
13798 }
13799 sctxt = rcd->sc->hw_context;
13800 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13801 /*
13802 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13803 * This check would not have been enabled for A0 h/w, see
13804 * set_ctxt_jkey().
13805 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013806 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013807 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13808 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13809 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13810 }
13811 /* Turn off the J_KEY on the receive side */
13812 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13813done:
13814 return ret;
13815}
13816
13817int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13818{
13819 struct hfi1_ctxtdata *rcd;
13820 unsigned sctxt;
13821 int ret = 0;
13822 u64 reg;
13823
13824 if (ctxt < dd->num_rcv_contexts)
13825 rcd = dd->rcd[ctxt];
13826 else {
13827 ret = -EINVAL;
13828 goto done;
13829 }
13830 if (!rcd || !rcd->sc) {
13831 ret = -EINVAL;
13832 goto done;
13833 }
13834 sctxt = rcd->sc->hw_context;
13835 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13836 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13837 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13838 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13839 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13840 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13841done:
13842 return ret;
13843}
13844
13845int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13846{
13847 struct hfi1_ctxtdata *rcd;
13848 unsigned sctxt;
13849 int ret = 0;
13850 u64 reg;
13851
13852 if (ctxt < dd->num_rcv_contexts)
13853 rcd = dd->rcd[ctxt];
13854 else {
13855 ret = -EINVAL;
13856 goto done;
13857 }
13858 if (!rcd || !rcd->sc) {
13859 ret = -EINVAL;
13860 goto done;
13861 }
13862 sctxt = rcd->sc->hw_context;
13863 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13864 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13865 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13866 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13867done:
13868 return ret;
13869}
13870
13871/*
13872 * Start doing the clean up the the chip. Our clean up happens in multiple
13873 * stages and this is just the first.
13874 */
13875void hfi1_start_cleanup(struct hfi1_devdata *dd)
13876{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013877 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013878 free_cntrs(dd);
13879 free_rcverr(dd);
13880 clean_up_interrupts(dd);
13881}
13882
13883#define HFI_BASE_GUID(dev) \
13884 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13885
13886/*
13887 * Certain chip functions need to be initialized only once per asic
13888 * instead of per-device. This function finds the peer device and
13889 * checks whether that chip initialization needs to be done by this
13890 * device.
13891 */
13892static void asic_should_init(struct hfi1_devdata *dd)
13893{
13894 unsigned long flags;
13895 struct hfi1_devdata *tmp, *peer = NULL;
13896
13897 spin_lock_irqsave(&hfi1_devs_lock, flags);
13898 /* Find our peer device */
13899 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13900 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13901 dd->unit != tmp->unit) {
13902 peer = tmp;
13903 break;
13904 }
13905 }
13906
13907 /*
13908 * "Claim" the ASIC for initialization if it hasn't been
13909 " "claimed" yet.
13910 */
13911 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13912 dd->flags |= HFI1_DO_INIT_ASIC;
13913 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13914}
13915
Dean Luick5d9157a2015-11-16 21:59:34 -050013916/*
13917 * Set dd->boardname. Use a generic name if a name is not returned from
13918 * EFI variable space.
13919 *
13920 * Return 0 on success, -ENOMEM if space could not be allocated.
13921 */
13922static int obtain_boardname(struct hfi1_devdata *dd)
13923{
13924 /* generic board description */
13925 const char generic[] =
13926 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13927 unsigned long size;
13928 int ret;
13929
13930 ret = read_hfi1_efi_var(dd, "description", &size,
13931 (void **)&dd->boardname);
13932 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080013933 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050013934 /* use generic description */
13935 dd->boardname = kstrdup(generic, GFP_KERNEL);
13936 if (!dd->boardname)
13937 return -ENOMEM;
13938 }
13939 return 0;
13940}
13941
Mike Marciniszyn77241052015-07-30 15:17:43 -040013942/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013943 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013944 * @dev: the pci_dev for hfi1_ib device
13945 * @ent: pci_device_id struct for this dev
13946 *
13947 * Also allocates, initializes, and returns the devdata struct for this
13948 * device instance
13949 *
13950 * This is global, and is called directly at init to set up the
13951 * chip-specific function pointers for later use.
13952 */
13953struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13954 const struct pci_device_id *ent)
13955{
13956 struct hfi1_devdata *dd;
13957 struct hfi1_pportdata *ppd;
13958 u64 reg;
13959 int i, ret;
13960 static const char * const inames[] = { /* implementation names */
13961 "RTL silicon",
13962 "RTL VCS simulation",
13963 "RTL FPGA emulation",
13964 "Functional simulator"
13965 };
13966
13967 dd = hfi1_alloc_devdata(pdev,
13968 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13969 if (IS_ERR(dd))
13970 goto bail;
13971 ppd = dd->pport;
13972 for (i = 0; i < dd->num_pports; i++, ppd++) {
13973 int vl;
13974 /* init common fields */
13975 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13976 /* DC supports 4 link widths */
13977 ppd->link_width_supported =
13978 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13979 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13980 ppd->link_width_downgrade_supported =
13981 ppd->link_width_supported;
13982 /* start out enabling only 4X */
13983 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13984 ppd->link_width_downgrade_enabled =
13985 ppd->link_width_downgrade_supported;
13986 /* link width active is 0 when link is down */
13987 /* link width downgrade active is 0 when link is down */
13988
Jubin Johnd0d236e2016-02-14 20:20:15 -080013989 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
13990 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013991 hfi1_early_err(&pdev->dev,
13992 "Invalid num_vls %u, using %u VLs\n",
13993 num_vls, HFI1_MAX_VLS_SUPPORTED);
13994 num_vls = HFI1_MAX_VLS_SUPPORTED;
13995 }
13996 ppd->vls_supported = num_vls;
13997 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080013998 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013999 /* Set the default MTU. */
14000 for (vl = 0; vl < num_vls; vl++)
14001 dd->vld[vl].mtu = hfi1_max_mtu;
14002 dd->vld[15].mtu = MAX_MAD_PACKET;
14003 /*
14004 * Set the initial values to reasonable default, will be set
14005 * for real when link is up.
14006 */
14007 ppd->lstate = IB_PORT_DOWN;
14008 ppd->overrun_threshold = 0x4;
14009 ppd->phy_error_threshold = 0xf;
14010 ppd->port_crc_mode_enabled = link_crc_mask;
14011 /* initialize supported LTP CRC mode */
14012 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14013 /* initialize enabled LTP CRC mode */
14014 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14015 /* start in offline */
14016 ppd->host_link_state = HLS_DN_OFFLINE;
14017 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014018 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014019 }
14020
14021 dd->link_default = HLS_DN_POLL;
14022
14023 /*
14024 * Do remaining PCIe setup and save PCIe values in dd.
14025 * Any error printing is already done by the init code.
14026 * On return, we have the chip mapped.
14027 */
14028 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14029 if (ret < 0)
14030 goto bail_free;
14031
14032 /* verify that reads actually work, save revision for reset check */
14033 dd->revision = read_csr(dd, CCE_REVISION);
14034 if (dd->revision == ~(u64)0) {
14035 dd_dev_err(dd, "cannot read chip CSRs\n");
14036 ret = -EINVAL;
14037 goto bail_cleanup;
14038 }
14039 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14040 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14041 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14042 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14043
14044 /* obtain the hardware ID - NOT related to unit, which is a
14045 software enumeration */
14046 reg = read_csr(dd, CCE_REVISION2);
14047 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14048 & CCE_REVISION2_HFI_ID_MASK;
14049 /* the variable size will remove unwanted bits */
14050 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14051 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14052 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14053 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
14054 (int)dd->irev);
14055
14056 /* speeds the hardware can support */
14057 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14058 /* speeds allowed to run at */
14059 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14060 /* give a reasonable active value, will be set on link up */
14061 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14062
14063 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14064 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14065 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14066 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14067 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14068 /* fix up link widths for emulation _p */
14069 ppd = dd->pport;
14070 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14071 ppd->link_width_supported =
14072 ppd->link_width_enabled =
14073 ppd->link_width_downgrade_supported =
14074 ppd->link_width_downgrade_enabled =
14075 OPA_LINK_WIDTH_1X;
14076 }
14077 /* insure num_vls isn't larger than number of sdma engines */
14078 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14079 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014080 num_vls, dd->chip_sdma_engines);
14081 num_vls = dd->chip_sdma_engines;
14082 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014083 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014084 }
14085
14086 /*
14087 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14088 * Limit the max if larger than the field holds. If timeout is
14089 * non-zero, then the calculated field will be at least 1.
14090 *
14091 * Must be after icode is set up - the cclock rate depends
14092 * on knowing the hardware being used.
14093 */
14094 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14095 if (dd->rcv_intr_timeout_csr >
14096 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14097 dd->rcv_intr_timeout_csr =
14098 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14099 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14100 dd->rcv_intr_timeout_csr = 1;
14101
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014102 /* needs to be done before we look for the peer device */
14103 read_guid(dd);
14104
14105 /* should this device init the ASIC block? */
14106 asic_should_init(dd);
14107
Mike Marciniszyn77241052015-07-30 15:17:43 -040014108 /* obtain chip sizes, reset chip CSRs */
14109 init_chip(dd);
14110
14111 /* read in the PCIe link speed information */
14112 ret = pcie_speeds(dd);
14113 if (ret)
14114 goto bail_cleanup;
14115
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014116 /* Needs to be called before hfi1_firmware_init */
14117 get_platform_config(dd);
14118
Mike Marciniszyn77241052015-07-30 15:17:43 -040014119 /* read in firmware */
14120 ret = hfi1_firmware_init(dd);
14121 if (ret)
14122 goto bail_cleanup;
14123
14124 /*
14125 * In general, the PCIe Gen3 transition must occur after the
14126 * chip has been idled (so it won't initiate any PCIe transactions
14127 * e.g. an interrupt) and before the driver changes any registers
14128 * (the transition will reset the registers).
14129 *
14130 * In particular, place this call after:
14131 * - init_chip() - the chip will not initiate any PCIe transactions
14132 * - pcie_speeds() - reads the current link speed
14133 * - hfi1_firmware_init() - the needed firmware is ready to be
14134 * downloaded
14135 */
14136 ret = do_pcie_gen3_transition(dd);
14137 if (ret)
14138 goto bail_cleanup;
14139
14140 /* start setting dd values and adjusting CSRs */
14141 init_early_variables(dd);
14142
14143 parse_platform_config(dd);
14144
Dean Luick5d9157a2015-11-16 21:59:34 -050014145 ret = obtain_boardname(dd);
14146 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014147 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014148
14149 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014150 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014151 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014152 (u32)dd->majrev,
14153 (u32)dd->minrev,
14154 (dd->revision >> CCE_REVISION_SW_SHIFT)
14155 & CCE_REVISION_SW_MASK);
14156
14157 ret = set_up_context_variables(dd);
14158 if (ret)
14159 goto bail_cleanup;
14160
14161 /* set initial RXE CSRs */
14162 init_rxe(dd);
14163 /* set initial TXE CSRs */
14164 init_txe(dd);
14165 /* set initial non-RXE, non-TXE CSRs */
14166 init_other(dd);
14167 /* set up KDETH QP prefix in both RX and TX CSRs */
14168 init_kdeth_qp(dd);
14169
Mitko Haralanov957558c2016-02-03 14:33:40 -080014170 ret = hfi1_dev_affinity_init(dd);
14171 if (ret)
14172 goto bail_cleanup;
14173
Mike Marciniszyn77241052015-07-30 15:17:43 -040014174 /* send contexts must be set up before receive contexts */
14175 ret = init_send_contexts(dd);
14176 if (ret)
14177 goto bail_cleanup;
14178
14179 ret = hfi1_create_ctxts(dd);
14180 if (ret)
14181 goto bail_cleanup;
14182
14183 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14184 /*
14185 * rcd[0] is guaranteed to be valid by this point. Also, all
14186 * context are using the same value, as per the module parameter.
14187 */
14188 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14189
14190 ret = init_pervl_scs(dd);
14191 if (ret)
14192 goto bail_cleanup;
14193
14194 /* sdma init */
14195 for (i = 0; i < dd->num_pports; ++i) {
14196 ret = sdma_init(dd, i);
14197 if (ret)
14198 goto bail_cleanup;
14199 }
14200
14201 /* use contexts created by hfi1_create_ctxts */
14202 ret = set_up_interrupts(dd);
14203 if (ret)
14204 goto bail_cleanup;
14205
14206 /* set up LCB access - must be after set_up_interrupts() */
14207 init_lcb_access(dd);
14208
14209 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14210 dd->base_guid & 0xFFFFFF);
14211
14212 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14213 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14214 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14215
14216 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14217 if (ret)
14218 goto bail_clear_intr;
14219 check_fabric_firmware_versions(dd);
14220
14221 thermal_init(dd);
14222
14223 ret = init_cntrs(dd);
14224 if (ret)
14225 goto bail_clear_intr;
14226
14227 ret = init_rcverr(dd);
14228 if (ret)
14229 goto bail_free_cntrs;
14230
14231 ret = eprom_init(dd);
14232 if (ret)
14233 goto bail_free_rcverr;
14234
14235 goto bail;
14236
14237bail_free_rcverr:
14238 free_rcverr(dd);
14239bail_free_cntrs:
14240 free_cntrs(dd);
14241bail_clear_intr:
14242 clean_up_interrupts(dd);
14243bail_cleanup:
14244 hfi1_pcie_ddcleanup(dd);
14245bail_free:
14246 hfi1_free_devdata(dd);
14247 dd = ERR_PTR(ret);
14248bail:
14249 return dd;
14250}
14251
14252static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14253 u32 dw_len)
14254{
14255 u32 delta_cycles;
14256 u32 current_egress_rate = ppd->current_egress_rate;
14257 /* rates here are in units of 10^6 bits/sec */
14258
14259 if (desired_egress_rate == -1)
14260 return 0; /* shouldn't happen */
14261
14262 if (desired_egress_rate >= current_egress_rate)
14263 return 0; /* we can't help go faster, only slower */
14264
14265 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14266 egress_cycles(dw_len * 4, current_egress_rate);
14267
14268 return (u16)delta_cycles;
14269}
14270
Mike Marciniszyn77241052015-07-30 15:17:43 -040014271/**
14272 * create_pbc - build a pbc for transmission
14273 * @flags: special case flags or-ed in built pbc
14274 * @srate: static rate
14275 * @vl: vl
14276 * @dwlen: dword length (header words + data words + pbc words)
14277 *
14278 * Create a PBC with the given flags, rate, VL, and length.
14279 *
14280 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14281 * for verbs, which does not use this PSM feature. The lone other caller
14282 * is for the diagnostic interface which calls this if the user does not
14283 * supply their own PBC.
14284 */
14285u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14286 u32 dw_len)
14287{
14288 u64 pbc, delay = 0;
14289
14290 if (unlikely(srate_mbs))
14291 delay = delay_cycles(ppd, srate_mbs, dw_len);
14292
14293 pbc = flags
14294 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14295 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14296 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14297 | (dw_len & PBC_LENGTH_DWS_MASK)
14298 << PBC_LENGTH_DWS_SHIFT;
14299
14300 return pbc;
14301}
14302
14303#define SBUS_THERMAL 0x4f
14304#define SBUS_THERM_MONITOR_MODE 0x1
14305
14306#define THERM_FAILURE(dev, ret, reason) \
14307 dd_dev_err((dd), \
14308 "Thermal sensor initialization failed: %s (%d)\n", \
14309 (reason), (ret))
14310
14311/*
14312 * Initialize the Avago Thermal sensor.
14313 *
14314 * After initialization, enable polling of thermal sensor through
14315 * SBus interface. In order for this to work, the SBus Master
14316 * firmware has to be loaded due to the fact that the HW polling
14317 * logic uses SBus interrupts, which are not supported with
14318 * default firmware. Otherwise, no data will be returned through
14319 * the ASIC_STS_THERM CSR.
14320 */
14321static int thermal_init(struct hfi1_devdata *dd)
14322{
14323 int ret = 0;
14324
14325 if (dd->icode != ICODE_RTL_SILICON ||
14326 !(dd->flags & HFI1_DO_INIT_ASIC))
14327 return ret;
14328
14329 acquire_hw_mutex(dd);
14330 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014331 /* Disable polling of thermal readings */
14332 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14333 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014334 /* Thermal Sensor Initialization */
14335 /* Step 1: Reset the Thermal SBus Receiver */
14336 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14337 RESET_SBUS_RECEIVER, 0);
14338 if (ret) {
14339 THERM_FAILURE(dd, ret, "Bus Reset");
14340 goto done;
14341 }
14342 /* Step 2: Set Reset bit in Thermal block */
14343 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14344 WRITE_SBUS_RECEIVER, 0x1);
14345 if (ret) {
14346 THERM_FAILURE(dd, ret, "Therm Block Reset");
14347 goto done;
14348 }
14349 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14350 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14351 WRITE_SBUS_RECEIVER, 0x32);
14352 if (ret) {
14353 THERM_FAILURE(dd, ret, "Write Clock Div");
14354 goto done;
14355 }
14356 /* Step 4: Select temperature mode */
14357 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14358 WRITE_SBUS_RECEIVER,
14359 SBUS_THERM_MONITOR_MODE);
14360 if (ret) {
14361 THERM_FAILURE(dd, ret, "Write Mode Sel");
14362 goto done;
14363 }
14364 /* Step 5: De-assert block reset and start conversion */
14365 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14366 WRITE_SBUS_RECEIVER, 0x2);
14367 if (ret) {
14368 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14369 goto done;
14370 }
14371 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14372 msleep(22);
14373
14374 /* Enable polling of thermal readings */
14375 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14376done:
14377 release_hw_mutex(dd);
14378 return ret;
14379}
14380
14381static void handle_temp_err(struct hfi1_devdata *dd)
14382{
14383 struct hfi1_pportdata *ppd = &dd->pport[0];
14384 /*
14385 * Thermal Critical Interrupt
14386 * Put the device into forced freeze mode, take link down to
14387 * offline, and put DC into reset.
14388 */
14389 dd_dev_emerg(dd,
14390 "Critical temperature reached! Forcing device into freeze mode!\n");
14391 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014392 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014393 /*
14394 * Shut DC down as much and as quickly as possible.
14395 *
14396 * Step 1: Take the link down to OFFLINE. This will cause the
14397 * 8051 to put the Serdes in reset. However, we don't want to
14398 * go through the entire link state machine since we want to
14399 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14400 * but rather an attempt to save the chip.
14401 * Code below is almost the same as quiet_serdes() but avoids
14402 * all the extra work and the sleeps.
14403 */
14404 ppd->driver_link_ready = 0;
14405 ppd->link_enabled = 0;
14406 set_physical_link_state(dd, PLS_OFFLINE |
14407 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14408 /*
14409 * Step 2: Shutdown LCB and 8051
14410 * After shutdown, do not restore DC_CFG_RESET value.
14411 */
14412 dc_shutdown(dd);
14413}