blob: e201c80c7644f3c915a891092a4889f7b96dad4b [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
68#define NUM_IB_PORTS 1
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78/*
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
84 */
85uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16; /* same as qib */
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101/* Other driver tunables */
102uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup; /* skip LNI */
106
107struct flag_table {
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
111 u16 unused0;
112 u32 unused1;
113};
114
115/* str must be a string constant */
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119/* Send Error Consequences */
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4 /* per-context only */
123#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124
Mike Marciniszyn77241052015-07-30 15:17:43 -0400125#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500126#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define NUM_MAP_REGS 32
128
129/* Bit offset into the GUID which carries HFI id information */
130#define GUID_HFI_INDEX_SHIFT 39
131
132/* extract the emulation revision */
133#define emulator_rev(dd) ((dd)->irev >> 8)
134/* parallel and serial emulation versions are 3 and 4 respectively */
135#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
136#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
137
138/* RSM fields */
139
140/* packet type */
141#define IB_PACKET_TYPE 2ull
142#define QW_SHIFT 6ull
143/* QPN[7..1] */
144#define QPN_WIDTH 7ull
145
146/* LRH.BTH: QW 0, OFFSET 48 - for match */
147#define LRH_BTH_QW 0ull
148#define LRH_BTH_BIT_OFFSET 48ull
149#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
150#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
151#define LRH_BTH_SELECT
152#define LRH_BTH_MASK 3ull
153#define LRH_BTH_VALUE 2ull
154
155/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
156#define LRH_SC_QW 0ull
157#define LRH_SC_BIT_OFFSET 56ull
158#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
159#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
160#define LRH_SC_MASK 128ull
161#define LRH_SC_VALUE 0ull
162
163/* SC[n..0] QW 0, OFFSET 60 - for select */
164#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
165
166/* QPN[m+n:1] QW 1, OFFSET 1 */
167#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
168
169/* defines to build power on SC2VL table */
170#define SC2VL_VAL( \
171 num, \
172 sc0, sc0val, \
173 sc1, sc1val, \
174 sc2, sc2val, \
175 sc3, sc3val, \
176 sc4, sc4val, \
177 sc5, sc5val, \
178 sc6, sc6val, \
179 sc7, sc7val) \
180( \
181 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
182 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
183 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
184 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
185 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
186 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
187 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
188 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
189)
190
191#define DC_SC_VL_VAL( \
192 range, \
193 e0, e0val, \
194 e1, e1val, \
195 e2, e2val, \
196 e3, e3val, \
197 e4, e4val, \
198 e5, e5val, \
199 e6, e6val, \
200 e7, e7val, \
201 e8, e8val, \
202 e9, e9val, \
203 e10, e10val, \
204 e11, e11val, \
205 e12, e12val, \
206 e13, e13val, \
207 e14, e14val, \
208 e15, e15val) \
209( \
210 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
211 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
212 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
213 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
214 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
215 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
216 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
217 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
218 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
219 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
220 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
221 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
222 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
223 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
224 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
225 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
226)
227
228/* all CceStatus sub-block freeze bits */
229#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
230 | CCE_STATUS_RXE_FROZE_SMASK \
231 | CCE_STATUS_TXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
233/* all CceStatus sub-block TXE pause bits */
234#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
235 | CCE_STATUS_TXE_PAUSED_SMASK \
236 | CCE_STATUS_SDMA_PAUSED_SMASK)
237/* all CceStatus sub-block RXE pause bits */
238#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
239
240/*
241 * CCE Error flags.
242 */
243static struct flag_table cce_err_status_flags[] = {
244/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
245 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
246/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
247 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
248/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
250/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
251 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
252/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
253 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
254/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
255 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
256/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
258/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
259 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
260/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
261 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
262/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
263 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
264/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
266/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
268/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
270/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
271 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
272/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
274/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
275 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
276/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
278/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
280/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
282/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
283 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
284/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
286/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
287 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
288/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
290/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
291 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
292/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
294/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
295 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
296/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
298/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
299 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
300/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
302/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
303 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
304/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
305 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
306/*31*/ FLAG_ENTRY0("LATriggered",
307 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
308/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
309 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
310/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
312/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
313 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
314/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
315 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
316/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
317 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
318/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
320/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
321 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
322/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
324/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
325 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
326/*41-63 reserved*/
327};
328
329/*
330 * Misc Error flags
331 */
332#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
333static struct flag_table misc_err_status_flags[] = {
334/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
335/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
336/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
337/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
338/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
339/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
340/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
341/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
342/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
343/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
344/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
345/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
346/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
347};
348
349/*
350 * TXE PIO Error flags and consequences
351 */
352static struct flag_table pio_err_status_flags[] = {
353/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
354 SEC_WRITE_DROPPED,
355 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
356/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
357 SEC_SPC_FREEZE,
358 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
359/* 2*/ FLAG_ENTRY("PioCsrParity",
360 SEC_SPC_FREEZE,
361 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
362/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
363 SEC_SPC_FREEZE,
364 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
365/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
366 SEC_SPC_FREEZE,
367 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
368/* 5*/ FLAG_ENTRY("PioPccFifoParity",
369 SEC_SPC_FREEZE,
370 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
371/* 6*/ FLAG_ENTRY("PioPecFifoParity",
372 SEC_SPC_FREEZE,
373 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
374/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
375 SEC_SPC_FREEZE,
376 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
377/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
378 SEC_SPC_FREEZE,
379 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
380/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
381 SEC_SPC_FREEZE,
382 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
383/*10*/ FLAG_ENTRY("PioSmPktResetParity",
384 SEC_SPC_FREEZE,
385 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
386/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
387 SEC_SPC_FREEZE,
388 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
389/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
390 SEC_SPC_FREEZE,
391 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
392/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
393 0,
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
395/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
396 0,
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
398/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
399 SEC_SPC_FREEZE,
400 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
401/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
402 SEC_SPC_FREEZE,
403 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
404/*17*/ FLAG_ENTRY("PioInitSmIn",
405 0,
406 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
407/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
408 SEC_SPC_FREEZE,
409 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
410/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
411 SEC_SPC_FREEZE,
412 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
413/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
414 0,
415 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
416/*21*/ FLAG_ENTRY("PioWriteDataParity",
417 SEC_SPC_FREEZE,
418 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
419/*22*/ FLAG_ENTRY("PioStateMachine",
420 SEC_SPC_FREEZE,
421 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
422/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
423 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
425/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
426 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
428/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
429 SEC_SPC_FREEZE,
430 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
431/*26*/ FLAG_ENTRY("PioVlfSopParity",
432 SEC_SPC_FREEZE,
433 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
434/*27*/ FLAG_ENTRY("PioVlFifoParity",
435 SEC_SPC_FREEZE,
436 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
437/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
438 SEC_SPC_FREEZE,
439 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
440/*29*/ FLAG_ENTRY("PioPpmcSopLen",
441 SEC_SPC_FREEZE,
442 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
443/*30-31 reserved*/
444/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
445 SEC_SPC_FREEZE,
446 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
447/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
448 SEC_SPC_FREEZE,
449 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
450/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
453/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
456/*36-63 reserved*/
457};
458
459/* TXE PIO errors that cause an SPC freeze */
460#define ALL_PIO_FREEZE_ERR \
461 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
462 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
490
491/*
492 * TXE SDMA Error flags
493 */
494static struct flag_table sdma_err_status_flags[] = {
495/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
496 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
497/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
498 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
499/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
500 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
501/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
503/*04-63 reserved*/
504};
505
506/* TXE SDMA errors that cause an SPC freeze */
507#define ALL_SDMA_FREEZE_ERR \
508 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
509 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
511
512/*
513 * TXE Egress Error flags
514 */
515#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
516static struct flag_table egress_err_status_flags[] = {
517/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
518/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
519/* 2 reserved */
520/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
521 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
522/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
523/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
524/* 6 reserved */
525/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
526 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
527/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
528 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
529/* 9-10 reserved */
530/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
531 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
532/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
533/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
534/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
535/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
536/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
537 SEES(TX_SDMA0_DISALLOWED_PACKET)),
538/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
539 SEES(TX_SDMA1_DISALLOWED_PACKET)),
540/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
541 SEES(TX_SDMA2_DISALLOWED_PACKET)),
542/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
543 SEES(TX_SDMA3_DISALLOWED_PACKET)),
544/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
545 SEES(TX_SDMA4_DISALLOWED_PACKET)),
546/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
547 SEES(TX_SDMA5_DISALLOWED_PACKET)),
548/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
549 SEES(TX_SDMA6_DISALLOWED_PACKET)),
550/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
551 SEES(TX_SDMA7_DISALLOWED_PACKET)),
552/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
553 SEES(TX_SDMA8_DISALLOWED_PACKET)),
554/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
555 SEES(TX_SDMA9_DISALLOWED_PACKET)),
556/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
557 SEES(TX_SDMA10_DISALLOWED_PACKET)),
558/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
559 SEES(TX_SDMA11_DISALLOWED_PACKET)),
560/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
561 SEES(TX_SDMA12_DISALLOWED_PACKET)),
562/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
563 SEES(TX_SDMA13_DISALLOWED_PACKET)),
564/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
565 SEES(TX_SDMA14_DISALLOWED_PACKET)),
566/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
567 SEES(TX_SDMA15_DISALLOWED_PACKET)),
568/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
569 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
570/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
571 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
572/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
573 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
574/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
575 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
576/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
578/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
580/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
582/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
584/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
586/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
587/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
588/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
589/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
590/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
591/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
592/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
593/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
594/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
595/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
596/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
597/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
598/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
599/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
600/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
601/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
602/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
603/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
604/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
605/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
606/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
607/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
608 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
609/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
610 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
611};
612
613/*
614 * TXE Egress Error Info flags
615 */
616#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
617static struct flag_table egress_err_info_flags[] = {
618/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
619/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
620/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
621/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
623/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
624/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
625/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
626/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
627/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
628/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
629/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
630/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
631/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
632/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
633/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
634/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
635/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
636/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
637/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
638/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
639/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
640};
641
642/* TXE Egress errors that cause an SPC freeze */
643#define ALL_TXE_EGRESS_FREEZE_ERR \
644 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
645 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
646 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
647 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
648 | SEES(TX_LAUNCH_CSR_PARITY) \
649 | SEES(TX_SBRD_CTL_CSR_PARITY) \
650 | SEES(TX_CONFIG_PARITY) \
651 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
652 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
653 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
654 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
655 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
656 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
660 | SEES(TX_CREDIT_RETURN_PARITY))
661
662/*
663 * TXE Send error flags
664 */
665#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
666static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500667/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400668/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
669/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
670};
671
672/*
673 * TXE Send Context Error flags and consequences
674 */
675static struct flag_table sc_err_status_flags[] = {
676/* 0*/ FLAG_ENTRY("InconsistentSop",
677 SEC_PACKET_DROPPED | SEC_SC_HALTED,
678 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
679/* 1*/ FLAG_ENTRY("DisallowedPacket",
680 SEC_PACKET_DROPPED | SEC_SC_HALTED,
681 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
682/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
683 SEC_WRITE_DROPPED | SEC_SC_HALTED,
684 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
685/* 3*/ FLAG_ENTRY("WriteOverflow",
686 SEC_WRITE_DROPPED | SEC_SC_HALTED,
687 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
688/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
689 SEC_WRITE_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
691/* 5-63 reserved*/
692};
693
694/*
695 * RXE Receive Error flags
696 */
697#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
698static struct flag_table rxe_err_status_flags[] = {
699/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
700/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
701/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
702/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
703/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
704/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
705/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
706/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
707/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
708/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
709/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
710/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
711/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
712/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
713/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
714/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
715/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
716 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
717/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
718/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
719/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
720 RXES(RBUF_BLOCK_LIST_READ_UNC)),
721/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
722 RXES(RBUF_BLOCK_LIST_READ_COR)),
723/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
724 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
725/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
726 RXES(RBUF_CSR_QENT_CNT_PARITY)),
727/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
728 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
729/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
730 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
731/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
732/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
733/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
734 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
735/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
736/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
737/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
738/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
739/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
740/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
741/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
742/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
743 RXES(RBUF_FL_INITDONE_PARITY)),
744/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
745 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
746/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
747/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
748/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
749/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
750 RXES(LOOKUP_DES_PART1_UNC_COR)),
751/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
752 RXES(LOOKUP_DES_PART2_PARITY)),
753/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
754/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
755/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
756/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
757/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
758/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
759/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
760/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
761/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
762/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
763/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
764/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
765/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
766/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
767/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
768/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
769/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
770/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
771/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
772/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
773/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
774/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
775};
776
777/* RXE errors that will trigger an SPC freeze */
778#define ALL_RXE_FREEZE_ERR \
779 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
780 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
781 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
782 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
783 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
784 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
823
824#define RXE_FREEZE_ABORT_MASK \
825 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
826 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
827 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
828
829/*
830 * DCC Error Flags
831 */
832#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
833static struct flag_table dcc_err_flags[] = {
834 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
835 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
836 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
837 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
838 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
839 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
840 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
841 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
842 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
843 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
844 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
845 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
846 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
847 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
848 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
849 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
850 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
851 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
852 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
853 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
854 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
855 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
856 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
857 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
858 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
859 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
860 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
861 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
862 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
863 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
864 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
865 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
866 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
867 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
868 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
869 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
870 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
871 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
872 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
873 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
874 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
875 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
876 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
877 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
878 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
879 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
880};
881
882/*
883 * LCB error flags
884 */
885#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
886static struct flag_table lcb_err_flags[] = {
887/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
888/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
889/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
890/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
891 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
892/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
893/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
894/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
895/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
896/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
897/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
898/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
899/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
900/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
901/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
902 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
903/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
904/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
905/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
906/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
907/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
908/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
909 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
910/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
911/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
912/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
913/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
914/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
915/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
916/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
917 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
918/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
919/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
920 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
921/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
922 LCBE(REDUNDANT_FLIT_PARITY_ERR))
923};
924
925/*
926 * DC8051 Error Flags
927 */
928#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
929static struct flag_table dc8051_err_flags[] = {
930 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
931 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
932 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
933 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
934 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
935 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
936 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
937 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
938 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
939 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
940 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
941};
942
943/*
944 * DC8051 Information Error flags
945 *
946 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
947 */
948static struct flag_table dc8051_info_err_flags[] = {
949 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
950 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
951 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
952 FLAG_ENTRY0("Serdes internal loopback failure",
953 FAILED_SERDES_INTERNAL_LOOPBACK),
954 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
955 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
956 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
957 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
958 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
959 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
960 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
961 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
962};
963
964/*
965 * DC8051 Information Host Information flags
966 *
967 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
968 */
969static struct flag_table dc8051_info_host_msg_flags[] = {
970 FLAG_ENTRY0("Host request done", 0x0001),
971 FLAG_ENTRY0("BC SMA message", 0x0002),
972 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
973 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
974 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
975 FLAG_ENTRY0("External device config request", 0x0020),
976 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
977 FLAG_ENTRY0("LinkUp achieved", 0x0080),
978 FLAG_ENTRY0("Link going down", 0x0100),
979};
980
981
982static u32 encoded_size(u32 size);
983static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
984static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
985static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
986 u8 *continuous);
987static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
988 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
989static void read_vc_remote_link_width(struct hfi1_devdata *dd,
990 u8 *remote_tx_rate, u16 *link_widths);
991static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
992 u8 *flag_bits, u16 *link_widths);
993static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
994 u8 *device_rev);
995static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
996static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
997static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
998 u8 *tx_polarity_inversion,
999 u8 *rx_polarity_inversion, u8 *max_rate);
1000static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1001 unsigned int context, u64 err_status);
1002static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1003static void handle_dcc_err(struct hfi1_devdata *dd,
1004 unsigned int context, u64 err_status);
1005static void handle_lcb_err(struct hfi1_devdata *dd,
1006 unsigned int context, u64 err_status);
1007static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1008static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void set_partition_keys(struct hfi1_pportdata *);
1016static const char *link_state_name(u32 state);
1017static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1018 u32 state);
1019static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1020 u64 *out_data);
1021static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1022static int thermal_init(struct hfi1_devdata *dd);
1023
1024static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1025 int msecs);
1026static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1027static void handle_temp_err(struct hfi1_devdata *);
1028static void dc_shutdown(struct hfi1_devdata *);
1029static void dc_start(struct hfi1_devdata *);
1030
1031/*
1032 * Error interrupt table entry. This is used as input to the interrupt
1033 * "clear down" routine used for all second tier error interrupt register.
1034 * Second tier interrupt registers have a single bit representing them
1035 * in the top-level CceIntStatus.
1036 */
1037struct err_reg_info {
1038 u32 status; /* status CSR offset */
1039 u32 clear; /* clear CSR offset */
1040 u32 mask; /* mask CSR offset */
1041 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1042 const char *desc;
1043};
1044
1045#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1046#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1047#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1048
1049/*
1050 * Helpers for building HFI and DC error interrupt table entries. Different
1051 * helpers are needed because of inconsistent register names.
1052 */
1053#define EE(reg, handler, desc) \
1054 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1055 handler, desc }
1056#define DC_EE1(reg, handler, desc) \
1057 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1058#define DC_EE2(reg, handler, desc) \
1059 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1060
1061/*
1062 * Table of the "misc" grouping of error interrupts. Each entry refers to
1063 * another register containing more information.
1064 */
1065static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1066/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1067/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1068/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1069/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1070/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1071/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1072/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1073/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1074 /* the rest are reserved */
1075};
1076
1077/*
1078 * Index into the Various section of the interrupt sources
1079 * corresponding to the Critical Temperature interrupt.
1080 */
1081#define TCRIT_INT_SOURCE 4
1082
1083/*
1084 * SDMA error interrupt entry - refers to another register containing more
1085 * information.
1086 */
1087static const struct err_reg_info sdma_eng_err =
1088 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1089
1090static const struct err_reg_info various_err[NUM_VARIOUS] = {
1091/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1092/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1093/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1094/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1095/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1096 /* rest are reserved */
1097};
1098
1099/*
1100 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1101 * register can not be derived from the MTU value because 10K is not
1102 * a power of 2. Therefore, we need a constant. Everything else can
1103 * be calculated.
1104 */
1105#define DCC_CFG_PORT_MTU_CAP_10240 7
1106
1107/*
1108 * Table of the DC grouping of error interrupts. Each entry refers to
1109 * another register containing more information.
1110 */
1111static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1112/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1113/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1114/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1115/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1116 /* the rest are reserved */
1117};
1118
1119struct cntr_entry {
1120 /*
1121 * counter name
1122 */
1123 char *name;
1124
1125 /*
1126 * csr to read for name (if applicable)
1127 */
1128 u64 csr;
1129
1130 /*
1131 * offset into dd or ppd to store the counter's value
1132 */
1133 int offset;
1134
1135 /*
1136 * flags
1137 */
1138 u8 flags;
1139
1140 /*
1141 * accessor for stat element, context either dd or ppd
1142 */
1143 u64 (*rw_cntr)(const struct cntr_entry *,
1144 void *context,
1145 int vl,
1146 int mode,
1147 u64 data);
1148};
1149
1150#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1151#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1152
1153#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1154{ \
1155 name, \
1156 csr, \
1157 offset, \
1158 flags, \
1159 accessor \
1160}
1161
1162/* 32bit RXE */
1163#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1164CNTR_ELEM(#name, \
1165 (counter * 8 + RCV_COUNTER_ARRAY32), \
1166 0, flags | CNTR_32BIT, \
1167 port_access_u32_csr)
1168
1169#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1170CNTR_ELEM(#name, \
1171 (counter * 8 + RCV_COUNTER_ARRAY32), \
1172 0, flags | CNTR_32BIT, \
1173 dev_access_u32_csr)
1174
1175/* 64bit RXE */
1176#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY64), \
1179 0, flags, \
1180 port_access_u64_csr)
1181
1182#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1183CNTR_ELEM(#name, \
1184 (counter * 8 + RCV_COUNTER_ARRAY64), \
1185 0, flags, \
1186 dev_access_u64_csr)
1187
1188#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1189#define OVR_ELM(ctx) \
1190CNTR_ELEM("RcvHdrOvr" #ctx, \
1191 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1192 0, CNTR_NORMAL, port_access_u64_csr)
1193
1194/* 32bit TXE */
1195#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1196CNTR_ELEM(#name, \
1197 (counter * 8 + SEND_COUNTER_ARRAY32), \
1198 0, flags | CNTR_32BIT, \
1199 port_access_u32_csr)
1200
1201/* 64bit TXE */
1202#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY64), \
1205 0, flags, \
1206 port_access_u64_csr)
1207
1208# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1209CNTR_ELEM(#name,\
1210 counter * 8 + SEND_COUNTER_ARRAY64, \
1211 0, \
1212 flags, \
1213 dev_access_u64_csr)
1214
1215/* CCE */
1216#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1217CNTR_ELEM(#name, \
1218 (counter * 8 + CCE_COUNTER_ARRAY32), \
1219 0, flags | CNTR_32BIT, \
1220 dev_access_u32_csr)
1221
1222#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1223CNTR_ELEM(#name, \
1224 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1225 0, flags | CNTR_32BIT, \
1226 dev_access_u32_csr)
1227
1228/* DC */
1229#define DC_PERF_CNTR(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 counter, \
1232 0, \
1233 flags, \
1234 dev_access_u64_csr)
1235
1236#define DC_PERF_CNTR_LCB(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dc_access_lcb_cntr)
1242
1243/* ibp counters */
1244#define SW_IBP_CNTR(name, cntr) \
1245CNTR_ELEM(#name, \
1246 0, \
1247 0, \
1248 CNTR_SYNTH, \
1249 access_ibp_##cntr)
1250
1251u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1252{
1253 u64 val;
1254
1255 if (dd->flags & HFI1_PRESENT) {
1256 val = readq((void __iomem *)dd->kregbase + offset);
1257 return val;
1258 }
1259 return -1;
1260}
1261
1262void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1263{
1264 if (dd->flags & HFI1_PRESENT)
1265 writeq(value, (void __iomem *)dd->kregbase + offset);
1266}
1267
1268void __iomem *get_csr_addr(
1269 struct hfi1_devdata *dd,
1270 u32 offset)
1271{
1272 return (void __iomem *)dd->kregbase + offset;
1273}
1274
1275static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1276 int mode, u64 value)
1277{
1278 u64 ret;
1279
1280
1281 if (mode == CNTR_MODE_R) {
1282 ret = read_csr(dd, csr);
1283 } else if (mode == CNTR_MODE_W) {
1284 write_csr(dd, csr, value);
1285 ret = value;
1286 } else {
1287 dd_dev_err(dd, "Invalid cntr register access mode");
1288 return 0;
1289 }
1290
1291 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1292 return ret;
1293}
1294
1295/* Dev Access */
1296static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297 void *context, int vl, int mode, u64 data)
1298{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301299 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001300
1301 if (vl != CNTR_INVALID_VL)
1302 return 0;
1303 return read_write_csr(dd, entry->csr, mode, data);
1304}
1305
1306static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1307 int vl, int mode, u64 data)
1308{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301309 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001310
1311 u64 val = 0;
1312 u64 csr = entry->csr;
1313
1314 if (entry->flags & CNTR_VL) {
1315 if (vl == CNTR_INVALID_VL)
1316 return 0;
1317 csr += 8 * vl;
1318 } else {
1319 if (vl != CNTR_INVALID_VL)
1320 return 0;
1321 }
1322
1323 val = read_write_csr(dd, csr, mode, data);
1324 return val;
1325}
1326
1327static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1328 int vl, int mode, u64 data)
1329{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301330 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001331 u32 csr = entry->csr;
1332 int ret = 0;
1333
1334 if (vl != CNTR_INVALID_VL)
1335 return 0;
1336 if (mode == CNTR_MODE_R)
1337 ret = read_lcb_csr(dd, csr, &data);
1338 else if (mode == CNTR_MODE_W)
1339 ret = write_lcb_csr(dd, csr, data);
1340
1341 if (ret) {
1342 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1343 return 0;
1344 }
1345
1346 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1347 return data;
1348}
1349
1350/* Port Access */
1351static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1352 int vl, int mode, u64 data)
1353{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301354 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001355
1356 if (vl != CNTR_INVALID_VL)
1357 return 0;
1358 return read_write_csr(ppd->dd, entry->csr, mode, data);
1359}
1360
1361static u64 port_access_u64_csr(const struct cntr_entry *entry,
1362 void *context, int vl, int mode, u64 data)
1363{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301364 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001365 u64 val;
1366 u64 csr = entry->csr;
1367
1368 if (entry->flags & CNTR_VL) {
1369 if (vl == CNTR_INVALID_VL)
1370 return 0;
1371 csr += 8 * vl;
1372 } else {
1373 if (vl != CNTR_INVALID_VL)
1374 return 0;
1375 }
1376 val = read_write_csr(ppd->dd, csr, mode, data);
1377 return val;
1378}
1379
1380/* Software defined */
1381static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1382 u64 data)
1383{
1384 u64 ret;
1385
1386 if (mode == CNTR_MODE_R) {
1387 ret = *cntr;
1388 } else if (mode == CNTR_MODE_W) {
1389 *cntr = data;
1390 ret = data;
1391 } else {
1392 dd_dev_err(dd, "Invalid cntr sw access mode");
1393 return 0;
1394 }
1395
1396 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1397
1398 return ret;
1399}
1400
1401static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1402 int vl, int mode, u64 data)
1403{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301404 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001405
1406 if (vl != CNTR_INVALID_VL)
1407 return 0;
1408 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1409}
1410
1411static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1412 int vl, int mode, u64 data)
1413{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301414 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001415
1416 if (vl != CNTR_INVALID_VL)
1417 return 0;
1418 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1419}
1420
1421static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1422 void *context, int vl, int mode, u64 data)
1423{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301424 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001425
1426 if (vl != CNTR_INVALID_VL)
1427 return 0;
1428
1429 return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1430}
1431
1432static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1433 void *context, int vl, int mode, u64 data)
1434{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301435 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001436
1437 if (vl != CNTR_INVALID_VL)
1438 return 0;
1439
1440 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1441 mode, data);
1442}
1443
1444static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1445 void *context, int vl, int mode, u64 data)
1446{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301447 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001448
1449 if (vl != CNTR_INVALID_VL)
1450 return 0;
1451
1452 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1453 mode, data);
1454}
1455
1456u64 get_all_cpu_total(u64 __percpu *cntr)
1457{
1458 int cpu;
1459 u64 counter = 0;
1460
1461 for_each_possible_cpu(cpu)
1462 counter += *per_cpu_ptr(cntr, cpu);
1463 return counter;
1464}
1465
1466static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1467 u64 __percpu *cntr,
1468 int vl, int mode, u64 data)
1469{
1470
1471 u64 ret = 0;
1472
1473 if (vl != CNTR_INVALID_VL)
1474 return 0;
1475
1476 if (mode == CNTR_MODE_R) {
1477 ret = get_all_cpu_total(cntr) - *z_val;
1478 } else if (mode == CNTR_MODE_W) {
1479 /* A write can only zero the counter */
1480 if (data == 0)
1481 *z_val = get_all_cpu_total(cntr);
1482 else
1483 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1484 } else {
1485 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1486 return 0;
1487 }
1488
1489 return ret;
1490}
1491
1492static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1493 void *context, int vl, int mode, u64 data)
1494{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301495 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496
1497 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1498 mode, data);
1499}
1500
1501static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1502 void *context, int vl, int mode, u64 data)
1503{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301504 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505
1506 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1507 mode, data);
1508}
1509
1510static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1511 void *context, int vl, int mode, u64 data)
1512{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301513 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001514
1515 return dd->verbs_dev.n_piowait;
1516}
1517
1518static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1519 void *context, int vl, int mode, u64 data)
1520{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301521 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001522
1523 return dd->verbs_dev.n_txwait;
1524}
1525
1526static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1527 void *context, int vl, int mode, u64 data)
1528{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301529 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001530
1531 return dd->verbs_dev.n_kmem_wait;
1532}
1533
Dean Luickb4219222015-10-26 10:28:35 -04001534static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1535 void *context, int vl, int mode, u64 data)
1536{
1537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1538
1539 return dd->verbs_dev.n_send_schedule;
1540}
1541
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001542/* Software counters for the error status bits within MISC_ERR_STATUS */
1543static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1544 void *context, int vl, int mode,
1545 u64 data)
1546{
1547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1548
1549 return dd->misc_err_status_cnt[12];
1550}
1551
1552static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1553 void *context, int vl, int mode,
1554 u64 data)
1555{
1556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1557
1558 return dd->misc_err_status_cnt[11];
1559}
1560
1561static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1562 void *context, int vl, int mode,
1563 u64 data)
1564{
1565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1566
1567 return dd->misc_err_status_cnt[10];
1568}
1569
1570static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1571 void *context, int vl,
1572 int mode, u64 data)
1573{
1574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1575
1576 return dd->misc_err_status_cnt[9];
1577}
1578
1579static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1580 void *context, int vl, int mode,
1581 u64 data)
1582{
1583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1584
1585 return dd->misc_err_status_cnt[8];
1586}
1587
1588static u64 access_misc_efuse_read_bad_addr_err_cnt(
1589 const struct cntr_entry *entry,
1590 void *context, int vl, int mode, u64 data)
1591{
1592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1593
1594 return dd->misc_err_status_cnt[7];
1595}
1596
1597static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1598 void *context, int vl,
1599 int mode, u64 data)
1600{
1601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1602
1603 return dd->misc_err_status_cnt[6];
1604}
1605
1606static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1607 void *context, int vl, int mode,
1608 u64 data)
1609{
1610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1611
1612 return dd->misc_err_status_cnt[5];
1613}
1614
1615static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1616 void *context, int vl, int mode,
1617 u64 data)
1618{
1619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1620
1621 return dd->misc_err_status_cnt[4];
1622}
1623
1624static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1625 void *context, int vl,
1626 int mode, u64 data)
1627{
1628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1629
1630 return dd->misc_err_status_cnt[3];
1631}
1632
1633static u64 access_misc_csr_write_bad_addr_err_cnt(
1634 const struct cntr_entry *entry,
1635 void *context, int vl, int mode, u64 data)
1636{
1637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1638
1639 return dd->misc_err_status_cnt[2];
1640}
1641
1642static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1643 void *context, int vl,
1644 int mode, u64 data)
1645{
1646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1647
1648 return dd->misc_err_status_cnt[1];
1649}
1650
1651static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1652 void *context, int vl, int mode,
1653 u64 data)
1654{
1655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1656
1657 return dd->misc_err_status_cnt[0];
1658}
1659
1660/*
1661 * Software counter for the aggregate of
1662 * individual CceErrStatus counters
1663 */
1664static u64 access_sw_cce_err_status_aggregated_cnt(
1665 const struct cntr_entry *entry,
1666 void *context, int vl, int mode, u64 data)
1667{
1668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1669
1670 return dd->sw_cce_err_status_aggregate;
1671}
1672
1673/*
1674 * Software counters corresponding to each of the
1675 * error status bits within CceErrStatus
1676 */
1677static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1678 void *context, int vl, int mode,
1679 u64 data)
1680{
1681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1682
1683 return dd->cce_err_status_cnt[40];
1684}
1685
1686static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1687 void *context, int vl, int mode,
1688 u64 data)
1689{
1690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1691
1692 return dd->cce_err_status_cnt[39];
1693}
1694
1695static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1696 void *context, int vl, int mode,
1697 u64 data)
1698{
1699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1700
1701 return dd->cce_err_status_cnt[38];
1702}
1703
1704static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1705 void *context, int vl, int mode,
1706 u64 data)
1707{
1708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1709
1710 return dd->cce_err_status_cnt[37];
1711}
1712
1713static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1714 void *context, int vl, int mode,
1715 u64 data)
1716{
1717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1718
1719 return dd->cce_err_status_cnt[36];
1720}
1721
1722static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1723 const struct cntr_entry *entry,
1724 void *context, int vl, int mode, u64 data)
1725{
1726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1727
1728 return dd->cce_err_status_cnt[35];
1729}
1730
1731static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1732 const struct cntr_entry *entry,
1733 void *context, int vl, int mode, u64 data)
1734{
1735 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1736
1737 return dd->cce_err_status_cnt[34];
1738}
1739
1740static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1741 void *context, int vl,
1742 int mode, u64 data)
1743{
1744 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1745
1746 return dd->cce_err_status_cnt[33];
1747}
1748
1749static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1750 void *context, int vl, int mode,
1751 u64 data)
1752{
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755 return dd->cce_err_status_cnt[32];
1756}
1757
1758static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1759 void *context, int vl, int mode, u64 data)
1760{
1761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1762
1763 return dd->cce_err_status_cnt[31];
1764}
1765
1766static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1767 void *context, int vl, int mode,
1768 u64 data)
1769{
1770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1771
1772 return dd->cce_err_status_cnt[30];
1773}
1774
1775static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1776 void *context, int vl, int mode,
1777 u64 data)
1778{
1779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1780
1781 return dd->cce_err_status_cnt[29];
1782}
1783
1784static u64 access_pcic_transmit_back_parity_err_cnt(
1785 const struct cntr_entry *entry,
1786 void *context, int vl, int mode, u64 data)
1787{
1788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1789
1790 return dd->cce_err_status_cnt[28];
1791}
1792
1793static u64 access_pcic_transmit_front_parity_err_cnt(
1794 const struct cntr_entry *entry,
1795 void *context, int vl, int mode, u64 data)
1796{
1797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1798
1799 return dd->cce_err_status_cnt[27];
1800}
1801
1802static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1803 void *context, int vl, int mode,
1804 u64 data)
1805{
1806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1807
1808 return dd->cce_err_status_cnt[26];
1809}
1810
1811static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1812 void *context, int vl, int mode,
1813 u64 data)
1814{
1815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1816
1817 return dd->cce_err_status_cnt[25];
1818}
1819
1820static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1821 void *context, int vl, int mode,
1822 u64 data)
1823{
1824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1825
1826 return dd->cce_err_status_cnt[24];
1827}
1828
1829static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1830 void *context, int vl, int mode,
1831 u64 data)
1832{
1833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1834
1835 return dd->cce_err_status_cnt[23];
1836}
1837
1838static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1839 void *context, int vl,
1840 int mode, u64 data)
1841{
1842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1843
1844 return dd->cce_err_status_cnt[22];
1845}
1846
1847static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1848 void *context, int vl, int mode,
1849 u64 data)
1850{
1851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1852
1853 return dd->cce_err_status_cnt[21];
1854}
1855
1856static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1857 const struct cntr_entry *entry,
1858 void *context, int vl, int mode, u64 data)
1859{
1860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861
1862 return dd->cce_err_status_cnt[20];
1863}
1864
1865static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1866 void *context, int vl,
1867 int mode, u64 data)
1868{
1869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870
1871 return dd->cce_err_status_cnt[19];
1872}
1873
1874static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1875 void *context, int vl, int mode,
1876 u64 data)
1877{
1878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879
1880 return dd->cce_err_status_cnt[18];
1881}
1882
1883static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1884 void *context, int vl, int mode,
1885 u64 data)
1886{
1887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888
1889 return dd->cce_err_status_cnt[17];
1890}
1891
1892static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1893 void *context, int vl, int mode,
1894 u64 data)
1895{
1896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897
1898 return dd->cce_err_status_cnt[16];
1899}
1900
1901static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1902 void *context, int vl, int mode,
1903 u64 data)
1904{
1905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906
1907 return dd->cce_err_status_cnt[15];
1908}
1909
1910static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1911 void *context, int vl,
1912 int mode, u64 data)
1913{
1914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915
1916 return dd->cce_err_status_cnt[14];
1917}
1918
1919static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1920 void *context, int vl, int mode,
1921 u64 data)
1922{
1923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924
1925 return dd->cce_err_status_cnt[13];
1926}
1927
1928static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1929 const struct cntr_entry *entry,
1930 void *context, int vl, int mode, u64 data)
1931{
1932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934 return dd->cce_err_status_cnt[12];
1935}
1936
1937static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1938 const struct cntr_entry *entry,
1939 void *context, int vl, int mode, u64 data)
1940{
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943 return dd->cce_err_status_cnt[11];
1944}
1945
1946static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
1947 const struct cntr_entry *entry,
1948 void *context, int vl, int mode, u64 data)
1949{
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952 return dd->cce_err_status_cnt[10];
1953}
1954
1955static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
1956 const struct cntr_entry *entry,
1957 void *context, int vl, int mode, u64 data)
1958{
1959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961 return dd->cce_err_status_cnt[9];
1962}
1963
1964static u64 access_cce_cli2_async_fifo_parity_err_cnt(
1965 const struct cntr_entry *entry,
1966 void *context, int vl, int mode, u64 data)
1967{
1968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970 return dd->cce_err_status_cnt[8];
1971}
1972
1973static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
1974 void *context, int vl,
1975 int mode, u64 data)
1976{
1977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979 return dd->cce_err_status_cnt[7];
1980}
1981
1982static u64 access_cce_cli0_async_fifo_parity_err_cnt(
1983 const struct cntr_entry *entry,
1984 void *context, int vl, int mode, u64 data)
1985{
1986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988 return dd->cce_err_status_cnt[6];
1989}
1990
1991static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
1992 void *context, int vl, int mode,
1993 u64 data)
1994{
1995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997 return dd->cce_err_status_cnt[5];
1998}
1999
2000static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2001 void *context, int vl, int mode,
2002 u64 data)
2003{
2004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006 return dd->cce_err_status_cnt[4];
2007}
2008
2009static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2010 const struct cntr_entry *entry,
2011 void *context, int vl, int mode, u64 data)
2012{
2013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015 return dd->cce_err_status_cnt[3];
2016}
2017
2018static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2019 void *context, int vl,
2020 int mode, u64 data)
2021{
2022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024 return dd->cce_err_status_cnt[2];
2025}
2026
2027static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2028 void *context, int vl,
2029 int mode, u64 data)
2030{
2031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033 return dd->cce_err_status_cnt[1];
2034}
2035
2036static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2037 void *context, int vl, int mode,
2038 u64 data)
2039{
2040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042 return dd->cce_err_status_cnt[0];
2043}
2044
2045/*
2046 * Software counters corresponding to each of the
2047 * error status bits within RcvErrStatus
2048 */
2049static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2050 void *context, int vl, int mode,
2051 u64 data)
2052{
2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054
2055 return dd->rcv_err_status_cnt[63];
2056}
2057
2058static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2059 void *context, int vl,
2060 int mode, u64 data)
2061{
2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063
2064 return dd->rcv_err_status_cnt[62];
2065}
2066
2067static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2068 void *context, int vl, int mode,
2069 u64 data)
2070{
2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072
2073 return dd->rcv_err_status_cnt[61];
2074}
2075
2076static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2077 void *context, int vl, int mode,
2078 u64 data)
2079{
2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081
2082 return dd->rcv_err_status_cnt[60];
2083}
2084
2085static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2086 void *context, int vl,
2087 int mode, u64 data)
2088{
2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090
2091 return dd->rcv_err_status_cnt[59];
2092}
2093
2094static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2095 void *context, int vl,
2096 int mode, u64 data)
2097{
2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099
2100 return dd->rcv_err_status_cnt[58];
2101}
2102
2103static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2104 void *context, int vl, int mode,
2105 u64 data)
2106{
2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108
2109 return dd->rcv_err_status_cnt[57];
2110}
2111
2112static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2113 void *context, int vl, int mode,
2114 u64 data)
2115{
2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117
2118 return dd->rcv_err_status_cnt[56];
2119}
2120
2121static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2122 void *context, int vl, int mode,
2123 u64 data)
2124{
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127 return dd->rcv_err_status_cnt[55];
2128}
2129
2130static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2131 const struct cntr_entry *entry,
2132 void *context, int vl, int mode, u64 data)
2133{
2134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2135
2136 return dd->rcv_err_status_cnt[54];
2137}
2138
2139static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2140 const struct cntr_entry *entry,
2141 void *context, int vl, int mode, u64 data)
2142{
2143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2144
2145 return dd->rcv_err_status_cnt[53];
2146}
2147
2148static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2149 void *context, int vl,
2150 int mode, u64 data)
2151{
2152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2153
2154 return dd->rcv_err_status_cnt[52];
2155}
2156
2157static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2158 void *context, int vl,
2159 int mode, u64 data)
2160{
2161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2162
2163 return dd->rcv_err_status_cnt[51];
2164}
2165
2166static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2167 void *context, int vl,
2168 int mode, u64 data)
2169{
2170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2171
2172 return dd->rcv_err_status_cnt[50];
2173}
2174
2175static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2176 void *context, int vl,
2177 int mode, u64 data)
2178{
2179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2180
2181 return dd->rcv_err_status_cnt[49];
2182}
2183
2184static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2185 void *context, int vl,
2186 int mode, u64 data)
2187{
2188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2189
2190 return dd->rcv_err_status_cnt[48];
2191}
2192
2193static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2194 void *context, int vl,
2195 int mode, u64 data)
2196{
2197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2198
2199 return dd->rcv_err_status_cnt[47];
2200}
2201
2202static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2203 void *context, int vl, int mode,
2204 u64 data)
2205{
2206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2207
2208 return dd->rcv_err_status_cnt[46];
2209}
2210
2211static u64 access_rx_hq_intr_csr_parity_err_cnt(
2212 const struct cntr_entry *entry,
2213 void *context, int vl, int mode, u64 data)
2214{
2215 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2216
2217 return dd->rcv_err_status_cnt[45];
2218}
2219
2220static u64 access_rx_lookup_csr_parity_err_cnt(
2221 const struct cntr_entry *entry,
2222 void *context, int vl, int mode, u64 data)
2223{
2224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226 return dd->rcv_err_status_cnt[44];
2227}
2228
2229static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2230 const struct cntr_entry *entry,
2231 void *context, int vl, int mode, u64 data)
2232{
2233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235 return dd->rcv_err_status_cnt[43];
2236}
2237
2238static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2239 const struct cntr_entry *entry,
2240 void *context, int vl, int mode, u64 data)
2241{
2242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244 return dd->rcv_err_status_cnt[42];
2245}
2246
2247static u64 access_rx_lookup_des_part2_parity_err_cnt(
2248 const struct cntr_entry *entry,
2249 void *context, int vl, int mode, u64 data)
2250{
2251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253 return dd->rcv_err_status_cnt[41];
2254}
2255
2256static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2257 const struct cntr_entry *entry,
2258 void *context, int vl, int mode, u64 data)
2259{
2260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262 return dd->rcv_err_status_cnt[40];
2263}
2264
2265static u64 access_rx_lookup_des_part1_unc_err_cnt(
2266 const struct cntr_entry *entry,
2267 void *context, int vl, int mode, u64 data)
2268{
2269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271 return dd->rcv_err_status_cnt[39];
2272}
2273
2274static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2275 const struct cntr_entry *entry,
2276 void *context, int vl, int mode, u64 data)
2277{
2278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280 return dd->rcv_err_status_cnt[38];
2281}
2282
2283static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2284 const struct cntr_entry *entry,
2285 void *context, int vl, int mode, u64 data)
2286{
2287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289 return dd->rcv_err_status_cnt[37];
2290}
2291
2292static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2293 const struct cntr_entry *entry,
2294 void *context, int vl, int mode, u64 data)
2295{
2296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298 return dd->rcv_err_status_cnt[36];
2299}
2300
2301static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2302 const struct cntr_entry *entry,
2303 void *context, int vl, int mode, u64 data)
2304{
2305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307 return dd->rcv_err_status_cnt[35];
2308}
2309
2310static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2311 const struct cntr_entry *entry,
2312 void *context, int vl, int mode, u64 data)
2313{
2314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316 return dd->rcv_err_status_cnt[34];
2317}
2318
2319static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2320 const struct cntr_entry *entry,
2321 void *context, int vl, int mode, u64 data)
2322{
2323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325 return dd->rcv_err_status_cnt[33];
2326}
2327
2328static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2329 void *context, int vl, int mode,
2330 u64 data)
2331{
2332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334 return dd->rcv_err_status_cnt[32];
2335}
2336
2337static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2338 void *context, int vl, int mode,
2339 u64 data)
2340{
2341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343 return dd->rcv_err_status_cnt[31];
2344}
2345
2346static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2347 void *context, int vl, int mode,
2348 u64 data)
2349{
2350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352 return dd->rcv_err_status_cnt[30];
2353}
2354
2355static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2356 void *context, int vl, int mode,
2357 u64 data)
2358{
2359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361 return dd->rcv_err_status_cnt[29];
2362}
2363
2364static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2365 void *context, int vl,
2366 int mode, u64 data)
2367{
2368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370 return dd->rcv_err_status_cnt[28];
2371}
2372
2373static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2374 const struct cntr_entry *entry,
2375 void *context, int vl, int mode, u64 data)
2376{
2377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379 return dd->rcv_err_status_cnt[27];
2380}
2381
2382static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2383 const struct cntr_entry *entry,
2384 void *context, int vl, int mode, u64 data)
2385{
2386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388 return dd->rcv_err_status_cnt[26];
2389}
2390
2391static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2392 const struct cntr_entry *entry,
2393 void *context, int vl, int mode, u64 data)
2394{
2395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397 return dd->rcv_err_status_cnt[25];
2398}
2399
2400static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2401 const struct cntr_entry *entry,
2402 void *context, int vl, int mode, u64 data)
2403{
2404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406 return dd->rcv_err_status_cnt[24];
2407}
2408
2409static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2410 const struct cntr_entry *entry,
2411 void *context, int vl, int mode, u64 data)
2412{
2413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415 return dd->rcv_err_status_cnt[23];
2416}
2417
2418static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2419 const struct cntr_entry *entry,
2420 void *context, int vl, int mode, u64 data)
2421{
2422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424 return dd->rcv_err_status_cnt[22];
2425}
2426
2427static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2428 const struct cntr_entry *entry,
2429 void *context, int vl, int mode, u64 data)
2430{
2431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433 return dd->rcv_err_status_cnt[21];
2434}
2435
2436static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2437 const struct cntr_entry *entry,
2438 void *context, int vl, int mode, u64 data)
2439{
2440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442 return dd->rcv_err_status_cnt[20];
2443}
2444
2445static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2446 const struct cntr_entry *entry,
2447 void *context, int vl, int mode, u64 data)
2448{
2449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451 return dd->rcv_err_status_cnt[19];
2452}
2453
2454static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2455 void *context, int vl,
2456 int mode, u64 data)
2457{
2458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460 return dd->rcv_err_status_cnt[18];
2461}
2462
2463static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2464 void *context, int vl,
2465 int mode, u64 data)
2466{
2467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469 return dd->rcv_err_status_cnt[17];
2470}
2471
2472static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2473 const struct cntr_entry *entry,
2474 void *context, int vl, int mode, u64 data)
2475{
2476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478 return dd->rcv_err_status_cnt[16];
2479}
2480
2481static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2482 const struct cntr_entry *entry,
2483 void *context, int vl, int mode, u64 data)
2484{
2485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487 return dd->rcv_err_status_cnt[15];
2488}
2489
2490static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2491 void *context, int vl,
2492 int mode, u64 data)
2493{
2494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496 return dd->rcv_err_status_cnt[14];
2497}
2498
2499static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2500 void *context, int vl,
2501 int mode, u64 data)
2502{
2503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505 return dd->rcv_err_status_cnt[13];
2506}
2507
2508static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2509 void *context, int vl, int mode,
2510 u64 data)
2511{
2512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514 return dd->rcv_err_status_cnt[12];
2515}
2516
2517static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2518 void *context, int vl, int mode,
2519 u64 data)
2520{
2521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523 return dd->rcv_err_status_cnt[11];
2524}
2525
2526static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2527 void *context, int vl, int mode,
2528 u64 data)
2529{
2530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532 return dd->rcv_err_status_cnt[10];
2533}
2534
2535static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2536 void *context, int vl, int mode,
2537 u64 data)
2538{
2539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541 return dd->rcv_err_status_cnt[9];
2542}
2543
2544static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2545 void *context, int vl, int mode,
2546 u64 data)
2547{
2548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550 return dd->rcv_err_status_cnt[8];
2551}
2552
2553static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2554 const struct cntr_entry *entry,
2555 void *context, int vl, int mode, u64 data)
2556{
2557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559 return dd->rcv_err_status_cnt[7];
2560}
2561
2562static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2563 const struct cntr_entry *entry,
2564 void *context, int vl, int mode, u64 data)
2565{
2566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568 return dd->rcv_err_status_cnt[6];
2569}
2570
2571static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2572 void *context, int vl, int mode,
2573 u64 data)
2574{
2575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577 return dd->rcv_err_status_cnt[5];
2578}
2579
2580static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2581 void *context, int vl, int mode,
2582 u64 data)
2583{
2584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586 return dd->rcv_err_status_cnt[4];
2587}
2588
2589static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2590 void *context, int vl, int mode,
2591 u64 data)
2592{
2593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595 return dd->rcv_err_status_cnt[3];
2596}
2597
2598static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2599 void *context, int vl, int mode,
2600 u64 data)
2601{
2602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604 return dd->rcv_err_status_cnt[2];
2605}
2606
2607static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2608 void *context, int vl, int mode,
2609 u64 data)
2610{
2611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613 return dd->rcv_err_status_cnt[1];
2614}
2615
2616static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2617 void *context, int vl, int mode,
2618 u64 data)
2619{
2620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622 return dd->rcv_err_status_cnt[0];
2623}
2624
2625/*
2626 * Software counters corresponding to each of the
2627 * error status bits within SendPioErrStatus
2628 */
2629static u64 access_pio_pec_sop_head_parity_err_cnt(
2630 const struct cntr_entry *entry,
2631 void *context, int vl, int mode, u64 data)
2632{
2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634
2635 return dd->send_pio_err_status_cnt[35];
2636}
2637
2638static u64 access_pio_pcc_sop_head_parity_err_cnt(
2639 const struct cntr_entry *entry,
2640 void *context, int vl, int mode, u64 data)
2641{
2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643
2644 return dd->send_pio_err_status_cnt[34];
2645}
2646
2647static u64 access_pio_last_returned_cnt_parity_err_cnt(
2648 const struct cntr_entry *entry,
2649 void *context, int vl, int mode, u64 data)
2650{
2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652
2653 return dd->send_pio_err_status_cnt[33];
2654}
2655
2656static u64 access_pio_current_free_cnt_parity_err_cnt(
2657 const struct cntr_entry *entry,
2658 void *context, int vl, int mode, u64 data)
2659{
2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661
2662 return dd->send_pio_err_status_cnt[32];
2663}
2664
2665static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2666 void *context, int vl, int mode,
2667 u64 data)
2668{
2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670
2671 return dd->send_pio_err_status_cnt[31];
2672}
2673
2674static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2675 void *context, int vl, int mode,
2676 u64 data)
2677{
2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679
2680 return dd->send_pio_err_status_cnt[30];
2681}
2682
2683static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2684 void *context, int vl, int mode,
2685 u64 data)
2686{
2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688
2689 return dd->send_pio_err_status_cnt[29];
2690}
2691
2692static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2693 const struct cntr_entry *entry,
2694 void *context, int vl, int mode, u64 data)
2695{
2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697
2698 return dd->send_pio_err_status_cnt[28];
2699}
2700
2701static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2702 void *context, int vl, int mode,
2703 u64 data)
2704{
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707 return dd->send_pio_err_status_cnt[27];
2708}
2709
2710static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2711 void *context, int vl, int mode,
2712 u64 data)
2713{
2714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2715
2716 return dd->send_pio_err_status_cnt[26];
2717}
2718
2719static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2720 void *context, int vl,
2721 int mode, u64 data)
2722{
2723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2724
2725 return dd->send_pio_err_status_cnt[25];
2726}
2727
2728static u64 access_pio_block_qw_count_parity_err_cnt(
2729 const struct cntr_entry *entry,
2730 void *context, int vl, int mode, u64 data)
2731{
2732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2733
2734 return dd->send_pio_err_status_cnt[24];
2735}
2736
2737static u64 access_pio_write_qw_valid_parity_err_cnt(
2738 const struct cntr_entry *entry,
2739 void *context, int vl, int mode, u64 data)
2740{
2741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2742
2743 return dd->send_pio_err_status_cnt[23];
2744}
2745
2746static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2747 void *context, int vl, int mode,
2748 u64 data)
2749{
2750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2751
2752 return dd->send_pio_err_status_cnt[22];
2753}
2754
2755static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2756 void *context, int vl,
2757 int mode, u64 data)
2758{
2759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2760
2761 return dd->send_pio_err_status_cnt[21];
2762}
2763
2764static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2765 void *context, int vl,
2766 int mode, u64 data)
2767{
2768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2769
2770 return dd->send_pio_err_status_cnt[20];
2771}
2772
2773static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2774 void *context, int vl,
2775 int mode, u64 data)
2776{
2777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2778
2779 return dd->send_pio_err_status_cnt[19];
2780}
2781
2782static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2783 const struct cntr_entry *entry,
2784 void *context, int vl, int mode, u64 data)
2785{
2786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2787
2788 return dd->send_pio_err_status_cnt[18];
2789}
2790
2791static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2792 void *context, int vl, int mode,
2793 u64 data)
2794{
2795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2796
2797 return dd->send_pio_err_status_cnt[17];
2798}
2799
2800static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2801 void *context, int vl, int mode,
2802 u64 data)
2803{
2804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806 return dd->send_pio_err_status_cnt[16];
2807}
2808
2809static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2810 const struct cntr_entry *entry,
2811 void *context, int vl, int mode, u64 data)
2812{
2813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815 return dd->send_pio_err_status_cnt[15];
2816}
2817
2818static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2819 const struct cntr_entry *entry,
2820 void *context, int vl, int mode, u64 data)
2821{
2822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824 return dd->send_pio_err_status_cnt[14];
2825}
2826
2827static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2828 const struct cntr_entry *entry,
2829 void *context, int vl, int mode, u64 data)
2830{
2831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833 return dd->send_pio_err_status_cnt[13];
2834}
2835
2836static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2837 const struct cntr_entry *entry,
2838 void *context, int vl, int mode, u64 data)
2839{
2840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842 return dd->send_pio_err_status_cnt[12];
2843}
2844
2845static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2846 const struct cntr_entry *entry,
2847 void *context, int vl, int mode, u64 data)
2848{
2849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851 return dd->send_pio_err_status_cnt[11];
2852}
2853
2854static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2855 const struct cntr_entry *entry,
2856 void *context, int vl, int mode, u64 data)
2857{
2858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860 return dd->send_pio_err_status_cnt[10];
2861}
2862
2863static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2864 const struct cntr_entry *entry,
2865 void *context, int vl, int mode, u64 data)
2866{
2867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869 return dd->send_pio_err_status_cnt[9];
2870}
2871
2872static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2873 const struct cntr_entry *entry,
2874 void *context, int vl, int mode, u64 data)
2875{
2876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878 return dd->send_pio_err_status_cnt[8];
2879}
2880
2881static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2882 const struct cntr_entry *entry,
2883 void *context, int vl, int mode, u64 data)
2884{
2885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887 return dd->send_pio_err_status_cnt[7];
2888}
2889
2890static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2891 void *context, int vl, int mode,
2892 u64 data)
2893{
2894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896 return dd->send_pio_err_status_cnt[6];
2897}
2898
2899static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2900 void *context, int vl, int mode,
2901 u64 data)
2902{
2903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905 return dd->send_pio_err_status_cnt[5];
2906}
2907
2908static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2909 void *context, int vl, int mode,
2910 u64 data)
2911{
2912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914 return dd->send_pio_err_status_cnt[4];
2915}
2916
2917static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2918 void *context, int vl, int mode,
2919 u64 data)
2920{
2921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923 return dd->send_pio_err_status_cnt[3];
2924}
2925
2926static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2927 void *context, int vl, int mode,
2928 u64 data)
2929{
2930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932 return dd->send_pio_err_status_cnt[2];
2933}
2934
2935static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2936 void *context, int vl,
2937 int mode, u64 data)
2938{
2939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941 return dd->send_pio_err_status_cnt[1];
2942}
2943
2944static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
2945 void *context, int vl, int mode,
2946 u64 data)
2947{
2948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950 return dd->send_pio_err_status_cnt[0];
2951}
2952
2953/*
2954 * Software counters corresponding to each of the
2955 * error status bits within SendDmaErrStatus
2956 */
2957static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
2958 const struct cntr_entry *entry,
2959 void *context, int vl, int mode, u64 data)
2960{
2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962
2963 return dd->send_dma_err_status_cnt[3];
2964}
2965
2966static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
2967 const struct cntr_entry *entry,
2968 void *context, int vl, int mode, u64 data)
2969{
2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971
2972 return dd->send_dma_err_status_cnt[2];
2973}
2974
2975static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
2976 void *context, int vl, int mode,
2977 u64 data)
2978{
2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980
2981 return dd->send_dma_err_status_cnt[1];
2982}
2983
2984static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
2985 void *context, int vl, int mode,
2986 u64 data)
2987{
2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989
2990 return dd->send_dma_err_status_cnt[0];
2991}
2992
2993/*
2994 * Software counters corresponding to each of the
2995 * error status bits within SendEgressErrStatus
2996 */
2997static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
2998 const struct cntr_entry *entry,
2999 void *context, int vl, int mode, u64 data)
3000{
3001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3002
3003 return dd->send_egress_err_status_cnt[63];
3004}
3005
3006static u64 access_tx_read_sdma_memory_csr_err_cnt(
3007 const struct cntr_entry *entry,
3008 void *context, int vl, int mode, u64 data)
3009{
3010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3011
3012 return dd->send_egress_err_status_cnt[62];
3013}
3014
3015static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3016 void *context, int vl, int mode,
3017 u64 data)
3018{
3019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3020
3021 return dd->send_egress_err_status_cnt[61];
3022}
3023
3024static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3025 void *context, int vl,
3026 int mode, u64 data)
3027{
3028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3029
3030 return dd->send_egress_err_status_cnt[60];
3031}
3032
3033static u64 access_tx_read_sdma_memory_cor_err_cnt(
3034 const struct cntr_entry *entry,
3035 void *context, int vl, int mode, u64 data)
3036{
3037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3038
3039 return dd->send_egress_err_status_cnt[59];
3040}
3041
3042static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3043 void *context, int vl, int mode,
3044 u64 data)
3045{
3046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047
3048 return dd->send_egress_err_status_cnt[58];
3049}
3050
3051static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3052 void *context, int vl, int mode,
3053 u64 data)
3054{
3055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056
3057 return dd->send_egress_err_status_cnt[57];
3058}
3059
3060static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3061 void *context, int vl, int mode,
3062 u64 data)
3063{
3064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065
3066 return dd->send_egress_err_status_cnt[56];
3067}
3068
3069static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3070 void *context, int vl, int mode,
3071 u64 data)
3072{
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075 return dd->send_egress_err_status_cnt[55];
3076}
3077
3078static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3079 void *context, int vl, int mode,
3080 u64 data)
3081{
3082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083
3084 return dd->send_egress_err_status_cnt[54];
3085}
3086
3087static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3088 void *context, int vl, int mode,
3089 u64 data)
3090{
3091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092
3093 return dd->send_egress_err_status_cnt[53];
3094}
3095
3096static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3097 void *context, int vl, int mode,
3098 u64 data)
3099{
3100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101
3102 return dd->send_egress_err_status_cnt[52];
3103}
3104
3105static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3106 void *context, int vl, int mode,
3107 u64 data)
3108{
3109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110
3111 return dd->send_egress_err_status_cnt[51];
3112}
3113
3114static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3115 void *context, int vl, int mode,
3116 u64 data)
3117{
3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119
3120 return dd->send_egress_err_status_cnt[50];
3121}
3122
3123static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3124 void *context, int vl, int mode,
3125 u64 data)
3126{
3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128
3129 return dd->send_egress_err_status_cnt[49];
3130}
3131
3132static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3133 void *context, int vl, int mode,
3134 u64 data)
3135{
3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3137
3138 return dd->send_egress_err_status_cnt[48];
3139}
3140
3141static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3142 void *context, int vl, int mode,
3143 u64 data)
3144{
3145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3146
3147 return dd->send_egress_err_status_cnt[47];
3148}
3149
3150static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3151 void *context, int vl, int mode,
3152 u64 data)
3153{
3154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3155
3156 return dd->send_egress_err_status_cnt[46];
3157}
3158
3159static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3160 void *context, int vl, int mode,
3161 u64 data)
3162{
3163 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3164
3165 return dd->send_egress_err_status_cnt[45];
3166}
3167
3168static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3169 void *context, int vl,
3170 int mode, u64 data)
3171{
3172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174 return dd->send_egress_err_status_cnt[44];
3175}
3176
3177static u64 access_tx_read_sdma_memory_unc_err_cnt(
3178 const struct cntr_entry *entry,
3179 void *context, int vl, int mode, u64 data)
3180{
3181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183 return dd->send_egress_err_status_cnt[43];
3184}
3185
3186static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3187 void *context, int vl, int mode,
3188 u64 data)
3189{
3190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192 return dd->send_egress_err_status_cnt[42];
3193}
3194
3195static u64 access_tx_credit_return_partiy_err_cnt(
3196 const struct cntr_entry *entry,
3197 void *context, int vl, int mode, u64 data)
3198{
3199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201 return dd->send_egress_err_status_cnt[41];
3202}
3203
3204static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3205 const struct cntr_entry *entry,
3206 void *context, int vl, int mode, u64 data)
3207{
3208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210 return dd->send_egress_err_status_cnt[40];
3211}
3212
3213static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3214 const struct cntr_entry *entry,
3215 void *context, int vl, int mode, u64 data)
3216{
3217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219 return dd->send_egress_err_status_cnt[39];
3220}
3221
3222static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3223 const struct cntr_entry *entry,
3224 void *context, int vl, int mode, u64 data)
3225{
3226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228 return dd->send_egress_err_status_cnt[38];
3229}
3230
3231static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3232 const struct cntr_entry *entry,
3233 void *context, int vl, int mode, u64 data)
3234{
3235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237 return dd->send_egress_err_status_cnt[37];
3238}
3239
3240static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3241 const struct cntr_entry *entry,
3242 void *context, int vl, int mode, u64 data)
3243{
3244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246 return dd->send_egress_err_status_cnt[36];
3247}
3248
3249static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3250 const struct cntr_entry *entry,
3251 void *context, int vl, int mode, u64 data)
3252{
3253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255 return dd->send_egress_err_status_cnt[35];
3256}
3257
3258static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3259 const struct cntr_entry *entry,
3260 void *context, int vl, int mode, u64 data)
3261{
3262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264 return dd->send_egress_err_status_cnt[34];
3265}
3266
3267static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3268 const struct cntr_entry *entry,
3269 void *context, int vl, int mode, u64 data)
3270{
3271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273 return dd->send_egress_err_status_cnt[33];
3274}
3275
3276static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3277 const struct cntr_entry *entry,
3278 void *context, int vl, int mode, u64 data)
3279{
3280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282 return dd->send_egress_err_status_cnt[32];
3283}
3284
3285static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3286 const struct cntr_entry *entry,
3287 void *context, int vl, int mode, u64 data)
3288{
3289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291 return dd->send_egress_err_status_cnt[31];
3292}
3293
3294static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3295 const struct cntr_entry *entry,
3296 void *context, int vl, int mode, u64 data)
3297{
3298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300 return dd->send_egress_err_status_cnt[30];
3301}
3302
3303static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3304 const struct cntr_entry *entry,
3305 void *context, int vl, int mode, u64 data)
3306{
3307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309 return dd->send_egress_err_status_cnt[29];
3310}
3311
3312static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3313 const struct cntr_entry *entry,
3314 void *context, int vl, int mode, u64 data)
3315{
3316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318 return dd->send_egress_err_status_cnt[28];
3319}
3320
3321static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3322 const struct cntr_entry *entry,
3323 void *context, int vl, int mode, u64 data)
3324{
3325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327 return dd->send_egress_err_status_cnt[27];
3328}
3329
3330static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3331 const struct cntr_entry *entry,
3332 void *context, int vl, int mode, u64 data)
3333{
3334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336 return dd->send_egress_err_status_cnt[26];
3337}
3338
3339static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3340 const struct cntr_entry *entry,
3341 void *context, int vl, int mode, u64 data)
3342{
3343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345 return dd->send_egress_err_status_cnt[25];
3346}
3347
3348static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3349 const struct cntr_entry *entry,
3350 void *context, int vl, int mode, u64 data)
3351{
3352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354 return dd->send_egress_err_status_cnt[24];
3355}
3356
3357static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3358 const struct cntr_entry *entry,
3359 void *context, int vl, int mode, u64 data)
3360{
3361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363 return dd->send_egress_err_status_cnt[23];
3364}
3365
3366static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3367 const struct cntr_entry *entry,
3368 void *context, int vl, int mode, u64 data)
3369{
3370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372 return dd->send_egress_err_status_cnt[22];
3373}
3374
3375static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3376 const struct cntr_entry *entry,
3377 void *context, int vl, int mode, u64 data)
3378{
3379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381 return dd->send_egress_err_status_cnt[21];
3382}
3383
3384static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3385 const struct cntr_entry *entry,
3386 void *context, int vl, int mode, u64 data)
3387{
3388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390 return dd->send_egress_err_status_cnt[20];
3391}
3392
3393static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3394 const struct cntr_entry *entry,
3395 void *context, int vl, int mode, u64 data)
3396{
3397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399 return dd->send_egress_err_status_cnt[19];
3400}
3401
3402static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3403 const struct cntr_entry *entry,
3404 void *context, int vl, int mode, u64 data)
3405{
3406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408 return dd->send_egress_err_status_cnt[18];
3409}
3410
3411static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3412 const struct cntr_entry *entry,
3413 void *context, int vl, int mode, u64 data)
3414{
3415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417 return dd->send_egress_err_status_cnt[17];
3418}
3419
3420static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3421 const struct cntr_entry *entry,
3422 void *context, int vl, int mode, u64 data)
3423{
3424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426 return dd->send_egress_err_status_cnt[16];
3427}
3428
3429static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3430 void *context, int vl, int mode,
3431 u64 data)
3432{
3433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435 return dd->send_egress_err_status_cnt[15];
3436}
3437
3438static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3439 void *context, int vl,
3440 int mode, u64 data)
3441{
3442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444 return dd->send_egress_err_status_cnt[14];
3445}
3446
3447static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3448 void *context, int vl, int mode,
3449 u64 data)
3450{
3451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453 return dd->send_egress_err_status_cnt[13];
3454}
3455
3456static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3457 void *context, int vl, int mode,
3458 u64 data)
3459{
3460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462 return dd->send_egress_err_status_cnt[12];
3463}
3464
3465static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3466 const struct cntr_entry *entry,
3467 void *context, int vl, int mode, u64 data)
3468{
3469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471 return dd->send_egress_err_status_cnt[11];
3472}
3473
3474static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3475 void *context, int vl, int mode,
3476 u64 data)
3477{
3478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480 return dd->send_egress_err_status_cnt[10];
3481}
3482
3483static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3484 void *context, int vl, int mode,
3485 u64 data)
3486{
3487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489 return dd->send_egress_err_status_cnt[9];
3490}
3491
3492static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3493 const struct cntr_entry *entry,
3494 void *context, int vl, int mode, u64 data)
3495{
3496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498 return dd->send_egress_err_status_cnt[8];
3499}
3500
3501static u64 access_tx_pio_launch_intf_parity_err_cnt(
3502 const struct cntr_entry *entry,
3503 void *context, int vl, int mode, u64 data)
3504{
3505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507 return dd->send_egress_err_status_cnt[7];
3508}
3509
3510static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3511 void *context, int vl, int mode,
3512 u64 data)
3513{
3514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516 return dd->send_egress_err_status_cnt[6];
3517}
3518
3519static u64 access_tx_incorrect_link_state_err_cnt(
3520 const struct cntr_entry *entry,
3521 void *context, int vl, int mode, u64 data)
3522{
3523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525 return dd->send_egress_err_status_cnt[5];
3526}
3527
3528static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3529 void *context, int vl, int mode,
3530 u64 data)
3531{
3532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534 return dd->send_egress_err_status_cnt[4];
3535}
3536
3537static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3538 const struct cntr_entry *entry,
3539 void *context, int vl, int mode, u64 data)
3540{
3541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543 return dd->send_egress_err_status_cnt[3];
3544}
3545
3546static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3547 void *context, int vl, int mode,
3548 u64 data)
3549{
3550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552 return dd->send_egress_err_status_cnt[2];
3553}
3554
3555static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3556 const struct cntr_entry *entry,
3557 void *context, int vl, int mode, u64 data)
3558{
3559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561 return dd->send_egress_err_status_cnt[1];
3562}
3563
3564static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3565 const struct cntr_entry *entry,
3566 void *context, int vl, int mode, u64 data)
3567{
3568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570 return dd->send_egress_err_status_cnt[0];
3571}
3572
3573/*
3574 * Software counters corresponding to each of the
3575 * error status bits within SendErrStatus
3576 */
3577static u64 access_send_csr_write_bad_addr_err_cnt(
3578 const struct cntr_entry *entry,
3579 void *context, int vl, int mode, u64 data)
3580{
3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582
3583 return dd->send_err_status_cnt[2];
3584}
3585
3586static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3587 void *context, int vl,
3588 int mode, u64 data)
3589{
3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591
3592 return dd->send_err_status_cnt[1];
3593}
3594
3595static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3596 void *context, int vl, int mode,
3597 u64 data)
3598{
3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600
3601 return dd->send_err_status_cnt[0];
3602}
3603
3604/*
3605 * Software counters corresponding to each of the
3606 * error status bits within SendCtxtErrStatus
3607 */
3608static u64 access_pio_write_out_of_bounds_err_cnt(
3609 const struct cntr_entry *entry,
3610 void *context, int vl, int mode, u64 data)
3611{
3612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3613
3614 return dd->sw_ctxt_err_status_cnt[4];
3615}
3616
3617static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3618 void *context, int vl, int mode,
3619 u64 data)
3620{
3621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3622
3623 return dd->sw_ctxt_err_status_cnt[3];
3624}
3625
3626static u64 access_pio_write_crosses_boundary_err_cnt(
3627 const struct cntr_entry *entry,
3628 void *context, int vl, int mode, u64 data)
3629{
3630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3631
3632 return dd->sw_ctxt_err_status_cnt[2];
3633}
3634
3635static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3636 void *context, int vl,
3637 int mode, u64 data)
3638{
3639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3640
3641 return dd->sw_ctxt_err_status_cnt[1];
3642}
3643
3644static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3645 void *context, int vl, int mode,
3646 u64 data)
3647{
3648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3649
3650 return dd->sw_ctxt_err_status_cnt[0];
3651}
3652
3653/*
3654 * Software counters corresponding to each of the
3655 * error status bits within SendDmaEngErrStatus
3656 */
3657static u64 access_sdma_header_request_fifo_cor_err_cnt(
3658 const struct cntr_entry *entry,
3659 void *context, int vl, int mode, u64 data)
3660{
3661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3662
3663 return dd->sw_send_dma_eng_err_status_cnt[23];
3664}
3665
3666static u64 access_sdma_header_storage_cor_err_cnt(
3667 const struct cntr_entry *entry,
3668 void *context, int vl, int mode, u64 data)
3669{
3670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3671
3672 return dd->sw_send_dma_eng_err_status_cnt[22];
3673}
3674
3675static u64 access_sdma_packet_tracking_cor_err_cnt(
3676 const struct cntr_entry *entry,
3677 void *context, int vl, int mode, u64 data)
3678{
3679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3680
3681 return dd->sw_send_dma_eng_err_status_cnt[21];
3682}
3683
3684static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3685 void *context, int vl, int mode,
3686 u64 data)
3687{
3688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3689
3690 return dd->sw_send_dma_eng_err_status_cnt[20];
3691}
3692
3693static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3694 void *context, int vl, int mode,
3695 u64 data)
3696{
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698
3699 return dd->sw_send_dma_eng_err_status_cnt[19];
3700}
3701
3702static u64 access_sdma_header_request_fifo_unc_err_cnt(
3703 const struct cntr_entry *entry,
3704 void *context, int vl, int mode, u64 data)
3705{
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707
3708 return dd->sw_send_dma_eng_err_status_cnt[18];
3709}
3710
3711static u64 access_sdma_header_storage_unc_err_cnt(
3712 const struct cntr_entry *entry,
3713 void *context, int vl, int mode, u64 data)
3714{
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716
3717 return dd->sw_send_dma_eng_err_status_cnt[17];
3718}
3719
3720static u64 access_sdma_packet_tracking_unc_err_cnt(
3721 const struct cntr_entry *entry,
3722 void *context, int vl, int mode, u64 data)
3723{
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725
3726 return dd->sw_send_dma_eng_err_status_cnt[16];
3727}
3728
3729static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3730 void *context, int vl, int mode,
3731 u64 data)
3732{
3733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735 return dd->sw_send_dma_eng_err_status_cnt[15];
3736}
3737
3738static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3739 void *context, int vl, int mode,
3740 u64 data)
3741{
3742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3743
3744 return dd->sw_send_dma_eng_err_status_cnt[14];
3745}
3746
3747static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3748 void *context, int vl, int mode,
3749 u64 data)
3750{
3751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3752
3753 return dd->sw_send_dma_eng_err_status_cnt[13];
3754}
3755
3756static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3757 void *context, int vl, int mode,
3758 u64 data)
3759{
3760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761
3762 return dd->sw_send_dma_eng_err_status_cnt[12];
3763}
3764
3765static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3766 void *context, int vl, int mode,
3767 u64 data)
3768{
3769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770
3771 return dd->sw_send_dma_eng_err_status_cnt[11];
3772}
3773
3774static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3775 void *context, int vl, int mode,
3776 u64 data)
3777{
3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779
3780 return dd->sw_send_dma_eng_err_status_cnt[10];
3781}
3782
3783static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3784 void *context, int vl, int mode,
3785 u64 data)
3786{
3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3788
3789 return dd->sw_send_dma_eng_err_status_cnt[9];
3790}
3791
3792static u64 access_sdma_packet_desc_overflow_err_cnt(
3793 const struct cntr_entry *entry,
3794 void *context, int vl, int mode, u64 data)
3795{
3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3797
3798 return dd->sw_send_dma_eng_err_status_cnt[8];
3799}
3800
3801static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3802 void *context, int vl,
3803 int mode, u64 data)
3804{
3805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3806
3807 return dd->sw_send_dma_eng_err_status_cnt[7];
3808}
3809
3810static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3811 void *context, int vl, int mode, u64 data)
3812{
3813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3814
3815 return dd->sw_send_dma_eng_err_status_cnt[6];
3816}
3817
3818static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3819 void *context, int vl, int mode,
3820 u64 data)
3821{
3822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3823
3824 return dd->sw_send_dma_eng_err_status_cnt[5];
3825}
3826
3827static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3828 void *context, int vl, int mode,
3829 u64 data)
3830{
3831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3832
3833 return dd->sw_send_dma_eng_err_status_cnt[4];
3834}
3835
3836static u64 access_sdma_tail_out_of_bounds_err_cnt(
3837 const struct cntr_entry *entry,
3838 void *context, int vl, int mode, u64 data)
3839{
3840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3841
3842 return dd->sw_send_dma_eng_err_status_cnt[3];
3843}
3844
3845static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3846 void *context, int vl, int mode,
3847 u64 data)
3848{
3849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3850
3851 return dd->sw_send_dma_eng_err_status_cnt[2];
3852}
3853
3854static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3855 void *context, int vl, int mode,
3856 u64 data)
3857{
3858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3859
3860 return dd->sw_send_dma_eng_err_status_cnt[1];
3861}
3862
3863static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3864 void *context, int vl, int mode,
3865 u64 data)
3866{
3867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3868
3869 return dd->sw_send_dma_eng_err_status_cnt[0];
3870}
3871
Mike Marciniszyn77241052015-07-30 15:17:43 -04003872#define def_access_sw_cpu(cntr) \
3873static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3874 void *context, int vl, int mode, u64 data) \
3875{ \
3876 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3877 return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr, \
3878 ppd->ibport_data.cntr, vl, \
3879 mode, data); \
3880}
3881
3882def_access_sw_cpu(rc_acks);
3883def_access_sw_cpu(rc_qacks);
3884def_access_sw_cpu(rc_delayed_comp);
3885
3886#define def_access_ibp_counter(cntr) \
3887static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3888 void *context, int vl, int mode, u64 data) \
3889{ \
3890 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3891 \
3892 if (vl != CNTR_INVALID_VL) \
3893 return 0; \
3894 \
3895 return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr, \
3896 mode, data); \
3897}
3898
3899def_access_ibp_counter(loop_pkts);
3900def_access_ibp_counter(rc_resends);
3901def_access_ibp_counter(rnr_naks);
3902def_access_ibp_counter(other_naks);
3903def_access_ibp_counter(rc_timeouts);
3904def_access_ibp_counter(pkt_drops);
3905def_access_ibp_counter(dmawait);
3906def_access_ibp_counter(rc_seqnak);
3907def_access_ibp_counter(rc_dupreq);
3908def_access_ibp_counter(rdma_seq);
3909def_access_ibp_counter(unaligned);
3910def_access_ibp_counter(seq_naks);
3911
3912static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3913[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3914[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3915 CNTR_NORMAL),
3916[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3917 CNTR_NORMAL),
3918[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3919 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3920 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003921[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3922 CNTR_NORMAL),
3923[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3924 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3925[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3926 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3927[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3928 CNTR_NORMAL),
3929[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3930 CNTR_NORMAL),
3931[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3932 CNTR_NORMAL),
3933[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3934 CNTR_NORMAL),
3935[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3936 CNTR_NORMAL),
3937[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3938 CNTR_NORMAL),
3939[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
3940 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
3941[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
3942 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
3943[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
3944 CNTR_SYNTH),
3945[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
3946[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
3947 CNTR_SYNTH),
3948[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
3949 CNTR_SYNTH),
3950[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
3951 CNTR_SYNTH),
3952[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
3953 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
3954[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
3955 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
3956 CNTR_SYNTH),
3957[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
3958 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
3959[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
3960 CNTR_SYNTH),
3961[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
3962 CNTR_SYNTH),
3963[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
3964 CNTR_SYNTH),
3965[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
3966 CNTR_SYNTH),
3967[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
3968 CNTR_SYNTH),
3969[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
3970 CNTR_SYNTH),
3971[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
3972 CNTR_SYNTH),
3973[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
3974 CNTR_SYNTH | CNTR_VL),
3975[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
3976 CNTR_SYNTH | CNTR_VL),
3977[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
3978[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
3979 CNTR_SYNTH | CNTR_VL),
3980[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
3981[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
3982 CNTR_SYNTH | CNTR_VL),
3983[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
3984 CNTR_SYNTH),
3985[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
3986 CNTR_SYNTH | CNTR_VL),
3987[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
3988 CNTR_SYNTH),
3989[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
3990 CNTR_SYNTH | CNTR_VL),
3991[C_DC_TOTAL_CRC] =
3992 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
3993 CNTR_SYNTH),
3994[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
3995 CNTR_SYNTH),
3996[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
3997 CNTR_SYNTH),
3998[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
3999 CNTR_SYNTH),
4000[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4001 CNTR_SYNTH),
4002[C_DC_CRC_MULT_LN] =
4003 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4004 CNTR_SYNTH),
4005[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4006 CNTR_SYNTH),
4007[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4008 CNTR_SYNTH),
4009[C_DC_SEQ_CRC_CNT] =
4010 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4011 CNTR_SYNTH),
4012[C_DC_ESC0_ONLY_CNT] =
4013 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4014 CNTR_SYNTH),
4015[C_DC_ESC0_PLUS1_CNT] =
4016 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4017 CNTR_SYNTH),
4018[C_DC_ESC0_PLUS2_CNT] =
4019 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4020 CNTR_SYNTH),
4021[C_DC_REINIT_FROM_PEER_CNT] =
4022 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4023 CNTR_SYNTH),
4024[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4025 CNTR_SYNTH),
4026[C_DC_MISC_FLG_CNT] =
4027 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4028 CNTR_SYNTH),
4029[C_DC_PRF_GOOD_LTP_CNT] =
4030 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4031[C_DC_PRF_ACCEPTED_LTP_CNT] =
4032 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4033 CNTR_SYNTH),
4034[C_DC_PRF_RX_FLIT_CNT] =
4035 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4036[C_DC_PRF_TX_FLIT_CNT] =
4037 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4038[C_DC_PRF_CLK_CNTR] =
4039 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4040[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4041 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4042[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4043 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4044 CNTR_SYNTH),
4045[C_DC_PG_STS_TX_SBE_CNT] =
4046 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4047[C_DC_PG_STS_TX_MBE_CNT] =
4048 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4049 CNTR_SYNTH),
4050[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4051 access_sw_cpu_intr),
4052[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4053 access_sw_cpu_rcv_limit),
4054[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4055 access_sw_vtx_wait),
4056[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4057 access_sw_pio_wait),
4058[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4059 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004060[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4061 access_sw_send_schedule),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004062/* MISC_ERR_STATUS */
4063[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4064 CNTR_NORMAL,
4065 access_misc_pll_lock_fail_err_cnt),
4066[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4067 CNTR_NORMAL,
4068 access_misc_mbist_fail_err_cnt),
4069[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4070 CNTR_NORMAL,
4071 access_misc_invalid_eep_cmd_err_cnt),
4072[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4073 CNTR_NORMAL,
4074 access_misc_efuse_done_parity_err_cnt),
4075[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4076 CNTR_NORMAL,
4077 access_misc_efuse_write_err_cnt),
4078[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4079 0, CNTR_NORMAL,
4080 access_misc_efuse_read_bad_addr_err_cnt),
4081[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4082 CNTR_NORMAL,
4083 access_misc_efuse_csr_parity_err_cnt),
4084[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4085 CNTR_NORMAL,
4086 access_misc_fw_auth_failed_err_cnt),
4087[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4088 CNTR_NORMAL,
4089 access_misc_key_mismatch_err_cnt),
4090[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4091 CNTR_NORMAL,
4092 access_misc_sbus_write_failed_err_cnt),
4093[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4094 CNTR_NORMAL,
4095 access_misc_csr_write_bad_addr_err_cnt),
4096[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4097 CNTR_NORMAL,
4098 access_misc_csr_read_bad_addr_err_cnt),
4099[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4100 CNTR_NORMAL,
4101 access_misc_csr_parity_err_cnt),
4102/* CceErrStatus */
4103[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4104 CNTR_NORMAL,
4105 access_sw_cce_err_status_aggregated_cnt),
4106[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4107 CNTR_NORMAL,
4108 access_cce_msix_csr_parity_err_cnt),
4109[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4110 CNTR_NORMAL,
4111 access_cce_int_map_unc_err_cnt),
4112[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4113 CNTR_NORMAL,
4114 access_cce_int_map_cor_err_cnt),
4115[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4116 CNTR_NORMAL,
4117 access_cce_msix_table_unc_err_cnt),
4118[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4119 CNTR_NORMAL,
4120 access_cce_msix_table_cor_err_cnt),
4121[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4122 0, CNTR_NORMAL,
4123 access_cce_rxdma_conv_fifo_parity_err_cnt),
4124[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4125 0, CNTR_NORMAL,
4126 access_cce_rcpl_async_fifo_parity_err_cnt),
4127[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4128 CNTR_NORMAL,
4129 access_cce_seg_write_bad_addr_err_cnt),
4130[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4131 CNTR_NORMAL,
4132 access_cce_seg_read_bad_addr_err_cnt),
4133[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4134 CNTR_NORMAL,
4135 access_la_triggered_cnt),
4136[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4137 CNTR_NORMAL,
4138 access_cce_trgt_cpl_timeout_err_cnt),
4139[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4140 CNTR_NORMAL,
4141 access_pcic_receive_parity_err_cnt),
4142[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4143 CNTR_NORMAL,
4144 access_pcic_transmit_back_parity_err_cnt),
4145[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4146 0, CNTR_NORMAL,
4147 access_pcic_transmit_front_parity_err_cnt),
4148[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4149 CNTR_NORMAL,
4150 access_pcic_cpl_dat_q_unc_err_cnt),
4151[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4152 CNTR_NORMAL,
4153 access_pcic_cpl_hd_q_unc_err_cnt),
4154[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4155 CNTR_NORMAL,
4156 access_pcic_post_dat_q_unc_err_cnt),
4157[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4158 CNTR_NORMAL,
4159 access_pcic_post_hd_q_unc_err_cnt),
4160[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4161 CNTR_NORMAL,
4162 access_pcic_retry_sot_mem_unc_err_cnt),
4163[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4164 CNTR_NORMAL,
4165 access_pcic_retry_mem_unc_err),
4166[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4167 CNTR_NORMAL,
4168 access_pcic_n_post_dat_q_parity_err_cnt),
4169[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4170 CNTR_NORMAL,
4171 access_pcic_n_post_h_q_parity_err_cnt),
4172[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4173 CNTR_NORMAL,
4174 access_pcic_cpl_dat_q_cor_err_cnt),
4175[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4176 CNTR_NORMAL,
4177 access_pcic_cpl_hd_q_cor_err_cnt),
4178[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4179 CNTR_NORMAL,
4180 access_pcic_post_dat_q_cor_err_cnt),
4181[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4182 CNTR_NORMAL,
4183 access_pcic_post_hd_q_cor_err_cnt),
4184[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4185 CNTR_NORMAL,
4186 access_pcic_retry_sot_mem_cor_err_cnt),
4187[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4188 CNTR_NORMAL,
4189 access_pcic_retry_mem_cor_err_cnt),
4190[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4191 "CceCli1AsyncFifoDbgParityError", 0, 0,
4192 CNTR_NORMAL,
4193 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4194[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4195 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4196 CNTR_NORMAL,
4197 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4198 ),
4199[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4200 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4201 CNTR_NORMAL,
4202 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4203[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4204 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4205 CNTR_NORMAL,
4206 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4207[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4208 0, CNTR_NORMAL,
4209 access_cce_cli2_async_fifo_parity_err_cnt),
4210[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4211 CNTR_NORMAL,
4212 access_cce_csr_cfg_bus_parity_err_cnt),
4213[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4214 0, CNTR_NORMAL,
4215 access_cce_cli0_async_fifo_parity_err_cnt),
4216[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4217 CNTR_NORMAL,
4218 access_cce_rspd_data_parity_err_cnt),
4219[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4220 CNTR_NORMAL,
4221 access_cce_trgt_access_err_cnt),
4222[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4223 0, CNTR_NORMAL,
4224 access_cce_trgt_async_fifo_parity_err_cnt),
4225[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4226 CNTR_NORMAL,
4227 access_cce_csr_write_bad_addr_err_cnt),
4228[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4229 CNTR_NORMAL,
4230 access_cce_csr_read_bad_addr_err_cnt),
4231[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4232 CNTR_NORMAL,
4233 access_ccs_csr_parity_err_cnt),
4234
4235/* RcvErrStatus */
4236[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_rx_csr_parity_err_cnt),
4239[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_rx_csr_write_bad_addr_err_cnt),
4242[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4243 CNTR_NORMAL,
4244 access_rx_csr_read_bad_addr_err_cnt),
4245[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_rx_dma_csr_unc_err_cnt),
4248[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_rx_dma_dq_fsm_encoding_err_cnt),
4251[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_rx_dma_eq_fsm_encoding_err_cnt),
4254[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_rx_dma_csr_parity_err_cnt),
4257[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_rx_rbuf_data_cor_err_cnt),
4260[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_rx_rbuf_data_unc_err_cnt),
4263[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_rx_dma_data_fifo_rd_cor_err_cnt),
4266[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_rx_dma_data_fifo_rd_unc_err_cnt),
4269[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4272[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4273 CNTR_NORMAL,
4274 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4275[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_rx_rbuf_desc_part2_cor_err_cnt),
4278[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4279 CNTR_NORMAL,
4280 access_rx_rbuf_desc_part2_unc_err_cnt),
4281[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4282 CNTR_NORMAL,
4283 access_rx_rbuf_desc_part1_cor_err_cnt),
4284[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4285 CNTR_NORMAL,
4286 access_rx_rbuf_desc_part1_unc_err_cnt),
4287[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4288 CNTR_NORMAL,
4289 access_rx_hq_intr_fsm_err_cnt),
4290[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4291 CNTR_NORMAL,
4292 access_rx_hq_intr_csr_parity_err_cnt),
4293[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4294 CNTR_NORMAL,
4295 access_rx_lookup_csr_parity_err_cnt),
4296[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4297 CNTR_NORMAL,
4298 access_rx_lookup_rcv_array_cor_err_cnt),
4299[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4300 CNTR_NORMAL,
4301 access_rx_lookup_rcv_array_unc_err_cnt),
4302[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4303 0, CNTR_NORMAL,
4304 access_rx_lookup_des_part2_parity_err_cnt),
4305[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4306 0, CNTR_NORMAL,
4307 access_rx_lookup_des_part1_unc_cor_err_cnt),
4308[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4309 CNTR_NORMAL,
4310 access_rx_lookup_des_part1_unc_err_cnt),
4311[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_rx_rbuf_next_free_buf_cor_err_cnt),
4314[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_rx_rbuf_next_free_buf_unc_err_cnt),
4317[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4318 "RxRbufFlInitWrAddrParityErr", 0, 0,
4319 CNTR_NORMAL,
4320 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4321[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4322 0, CNTR_NORMAL,
4323 access_rx_rbuf_fl_initdone_parity_err_cnt),
4324[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4325 0, CNTR_NORMAL,
4326 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4327[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4330[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_rx_rbuf_empty_err_cnt),
4333[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_rx_rbuf_full_err_cnt),
4336[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rbuf_bad_lookup_err_cnt),
4339[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rbuf_ctx_id_parity_err_cnt),
4342[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rbuf_csr_qeopdw_parity_err_cnt),
4345[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4346 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4349[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4350 "RxRbufCsrQTlPtrParityErr", 0, 0,
4351 CNTR_NORMAL,
4352 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4353[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4354 0, CNTR_NORMAL,
4355 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4356[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4357 0, CNTR_NORMAL,
4358 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4359[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4360 0, 0, CNTR_NORMAL,
4361 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4362[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4363 0, CNTR_NORMAL,
4364 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4365[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4366 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4369[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4370 0, CNTR_NORMAL,
4371 access_rx_rbuf_block_list_read_cor_err_cnt),
4372[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4373 0, CNTR_NORMAL,
4374 access_rx_rbuf_block_list_read_unc_err_cnt),
4375[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_rbuf_lookup_des_cor_err_cnt),
4378[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_rx_rbuf_lookup_des_unc_err_cnt),
4381[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4382 "RxRbufLookupDesRegUncCorErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4385[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4388[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_rbuf_free_list_cor_err_cnt),
4391[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_rx_rbuf_free_list_unc_err_cnt),
4394[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rx_rcv_fsm_encoding_err_cnt),
4397[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4398 CNTR_NORMAL,
4399 access_rx_dma_flag_cor_err_cnt),
4400[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4401 CNTR_NORMAL,
4402 access_rx_dma_flag_unc_err_cnt),
4403[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_dc_sop_eop_parity_err_cnt),
4406[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rcv_csr_parity_err_cnt),
4409[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rcv_qp_map_table_cor_err_cnt),
4412[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4413 CNTR_NORMAL,
4414 access_rx_rcv_qp_map_table_unc_err_cnt),
4415[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rx_rcv_data_cor_err_cnt),
4418[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rx_rcv_data_unc_err_cnt),
4421[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4422 CNTR_NORMAL,
4423 access_rx_rcv_hdr_cor_err_cnt),
4424[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4425 CNTR_NORMAL,
4426 access_rx_rcv_hdr_unc_err_cnt),
4427[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4428 CNTR_NORMAL,
4429 access_rx_dc_intf_parity_err_cnt),
4430[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4431 CNTR_NORMAL,
4432 access_rx_dma_csr_cor_err_cnt),
4433/* SendPioErrStatus */
4434[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_pio_pec_sop_head_parity_err_cnt),
4437[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_pio_pcc_sop_head_parity_err_cnt),
4440[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4441 0, 0, CNTR_NORMAL,
4442 access_pio_last_returned_cnt_parity_err_cnt),
4443[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4444 0, CNTR_NORMAL,
4445 access_pio_current_free_cnt_parity_err_cnt),
4446[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4447 CNTR_NORMAL,
4448 access_pio_reserved_31_err_cnt),
4449[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4450 CNTR_NORMAL,
4451 access_pio_reserved_30_err_cnt),
4452[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4453 CNTR_NORMAL,
4454 access_pio_ppmc_sop_len_err_cnt),
4455[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4456 CNTR_NORMAL,
4457 access_pio_ppmc_bqc_mem_parity_err_cnt),
4458[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_pio_vl_fifo_parity_err_cnt),
4461[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_pio_vlf_sop_parity_err_cnt),
4464[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_pio_vlf_v1_len_parity_err_cnt),
4467[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_pio_block_qw_count_parity_err_cnt),
4470[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_pio_write_qw_valid_parity_err_cnt),
4473[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_pio_state_machine_err_cnt),
4476[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_pio_write_data_parity_err_cnt),
4479[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_pio_host_addr_mem_cor_err_cnt),
4482[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_pio_host_addr_mem_unc_err_cnt),
4485[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4488[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_pio_init_sm_in_err_cnt),
4491[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_pio_ppmc_pbl_fifo_err_cnt),
4494[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4495 0, CNTR_NORMAL,
4496 access_pio_credit_ret_fifo_parity_err_cnt),
4497[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_pio_v1_len_mem_bank1_cor_err_cnt),
4500[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_pio_v1_len_mem_bank0_cor_err_cnt),
4503[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_pio_v1_len_mem_bank1_unc_err_cnt),
4506[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_pio_v1_len_mem_bank0_unc_err_cnt),
4509[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_pio_sm_pkt_reset_parity_err_cnt),
4512[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_pio_pkt_evict_fifo_parity_err_cnt),
4515[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4516 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4519[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_pio_sbrdctl_crrel_parity_err_cnt),
4522[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_pio_pec_fifo_parity_err_cnt),
4525[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_pio_pcc_fifo_parity_err_cnt),
4528[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4529 CNTR_NORMAL,
4530 access_pio_sb_mem_fifo1_err_cnt),
4531[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4532 CNTR_NORMAL,
4533 access_pio_sb_mem_fifo0_err_cnt),
4534[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_csr_parity_err_cnt),
4537[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_pio_write_addr_parity_err_cnt),
4540[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_pio_write_bad_ctxt_err_cnt),
4543/* SendDmaErrStatus */
4544[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4545 0, CNTR_NORMAL,
4546 access_sdma_pcie_req_tracking_cor_err_cnt),
4547[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4548 0, CNTR_NORMAL,
4549 access_sdma_pcie_req_tracking_unc_err_cnt),
4550[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_sdma_csr_parity_err_cnt),
4553[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_sdma_rpy_tag_err_cnt),
4556/* SendEgressErrStatus */
4557[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_tx_read_pio_memory_csr_unc_err_cnt),
4560[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4561 0, CNTR_NORMAL,
4562 access_tx_read_sdma_memory_csr_err_cnt),
4563[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_tx_egress_fifo_cor_err_cnt),
4566[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4567 CNTR_NORMAL,
4568 access_tx_read_pio_memory_cor_err_cnt),
4569[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4570 CNTR_NORMAL,
4571 access_tx_read_sdma_memory_cor_err_cnt),
4572[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_tx_sb_hdr_cor_err_cnt),
4575[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_tx_credit_overrun_err_cnt),
4578[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_tx_launch_fifo8_cor_err_cnt),
4581[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_tx_launch_fifo7_cor_err_cnt),
4584[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_tx_launch_fifo6_cor_err_cnt),
4587[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_tx_launch_fifo5_cor_err_cnt),
4590[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4591 CNTR_NORMAL,
4592 access_tx_launch_fifo4_cor_err_cnt),
4593[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_tx_launch_fifo3_cor_err_cnt),
4596[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_tx_launch_fifo2_cor_err_cnt),
4599[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4600 CNTR_NORMAL,
4601 access_tx_launch_fifo1_cor_err_cnt),
4602[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4603 CNTR_NORMAL,
4604 access_tx_launch_fifo0_cor_err_cnt),
4605[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_tx_credit_return_vl_err_cnt),
4608[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_tx_hcrc_insertion_err_cnt),
4611[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_tx_egress_fifo_unc_err_cnt),
4614[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_tx_read_pio_memory_unc_err_cnt),
4617[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_tx_read_sdma_memory_unc_err_cnt),
4620[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4621 CNTR_NORMAL,
4622 access_tx_sb_hdr_unc_err_cnt),
4623[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4624 CNTR_NORMAL,
4625 access_tx_credit_return_partiy_err_cnt),
4626[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4627 0, 0, CNTR_NORMAL,
4628 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4629[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4630 0, 0, CNTR_NORMAL,
4631 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4632[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4633 0, 0, CNTR_NORMAL,
4634 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4635[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4636 0, 0, CNTR_NORMAL,
4637 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4638[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4639 0, 0, CNTR_NORMAL,
4640 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4641[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4642 0, 0, CNTR_NORMAL,
4643 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4644[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4645 0, 0, CNTR_NORMAL,
4646 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4647[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4648 0, 0, CNTR_NORMAL,
4649 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4650[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4651 0, 0, CNTR_NORMAL,
4652 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4653[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4654 0, 0, CNTR_NORMAL,
4655 access_tx_sdma15_disallowed_packet_err_cnt),
4656[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4657 0, 0, CNTR_NORMAL,
4658 access_tx_sdma14_disallowed_packet_err_cnt),
4659[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4660 0, 0, CNTR_NORMAL,
4661 access_tx_sdma13_disallowed_packet_err_cnt),
4662[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4663 0, 0, CNTR_NORMAL,
4664 access_tx_sdma12_disallowed_packet_err_cnt),
4665[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4666 0, 0, CNTR_NORMAL,
4667 access_tx_sdma11_disallowed_packet_err_cnt),
4668[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4669 0, 0, CNTR_NORMAL,
4670 access_tx_sdma10_disallowed_packet_err_cnt),
4671[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4672 0, 0, CNTR_NORMAL,
4673 access_tx_sdma9_disallowed_packet_err_cnt),
4674[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4675 0, 0, CNTR_NORMAL,
4676 access_tx_sdma8_disallowed_packet_err_cnt),
4677[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4678 0, 0, CNTR_NORMAL,
4679 access_tx_sdma7_disallowed_packet_err_cnt),
4680[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4681 0, 0, CNTR_NORMAL,
4682 access_tx_sdma6_disallowed_packet_err_cnt),
4683[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4684 0, 0, CNTR_NORMAL,
4685 access_tx_sdma5_disallowed_packet_err_cnt),
4686[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4687 0, 0, CNTR_NORMAL,
4688 access_tx_sdma4_disallowed_packet_err_cnt),
4689[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4690 0, 0, CNTR_NORMAL,
4691 access_tx_sdma3_disallowed_packet_err_cnt),
4692[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4693 0, 0, CNTR_NORMAL,
4694 access_tx_sdma2_disallowed_packet_err_cnt),
4695[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4696 0, 0, CNTR_NORMAL,
4697 access_tx_sdma1_disallowed_packet_err_cnt),
4698[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4699 0, 0, CNTR_NORMAL,
4700 access_tx_sdma0_disallowed_packet_err_cnt),
4701[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4702 CNTR_NORMAL,
4703 access_tx_config_parity_err_cnt),
4704[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_tx_sbrd_ctl_csr_parity_err_cnt),
4707[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_tx_launch_csr_parity_err_cnt),
4710[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_tx_illegal_vl_err_cnt),
4713[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4714 "TxSbrdCtlStateMachineParityErr", 0, 0,
4715 CNTR_NORMAL,
4716 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4717[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4718 CNTR_NORMAL,
4719 access_egress_reserved_10_err_cnt),
4720[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4721 CNTR_NORMAL,
4722 access_egress_reserved_9_err_cnt),
4723[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4724 0, 0, CNTR_NORMAL,
4725 access_tx_sdma_launch_intf_parity_err_cnt),
4726[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4727 CNTR_NORMAL,
4728 access_tx_pio_launch_intf_parity_err_cnt),
4729[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4730 CNTR_NORMAL,
4731 access_egress_reserved_6_err_cnt),
4732[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4733 CNTR_NORMAL,
4734 access_tx_incorrect_link_state_err_cnt),
4735[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4736 CNTR_NORMAL,
4737 access_tx_linkdown_err_cnt),
4738[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4739 "EgressFifoUnderrunOrParityErr", 0, 0,
4740 CNTR_NORMAL,
4741 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4742[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4743 CNTR_NORMAL,
4744 access_egress_reserved_2_err_cnt),
4745[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4746 CNTR_NORMAL,
4747 access_tx_pkt_integrity_mem_unc_err_cnt),
4748[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4749 CNTR_NORMAL,
4750 access_tx_pkt_integrity_mem_cor_err_cnt),
4751/* SendErrStatus */
4752[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4753 CNTR_NORMAL,
4754 access_send_csr_write_bad_addr_err_cnt),
4755[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4756 CNTR_NORMAL,
4757 access_send_csr_read_bad_addr_err_cnt),
4758[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4759 CNTR_NORMAL,
4760 access_send_csr_parity_cnt),
4761/* SendCtxtErrStatus */
4762[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4763 CNTR_NORMAL,
4764 access_pio_write_out_of_bounds_err_cnt),
4765[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4766 CNTR_NORMAL,
4767 access_pio_write_overflow_err_cnt),
4768[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4769 0, 0, CNTR_NORMAL,
4770 access_pio_write_crosses_boundary_err_cnt),
4771[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4772 CNTR_NORMAL,
4773 access_pio_disallowed_packet_err_cnt),
4774[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4775 CNTR_NORMAL,
4776 access_pio_inconsistent_sop_err_cnt),
4777/* SendDmaEngErrStatus */
4778[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4779 0, 0, CNTR_NORMAL,
4780 access_sdma_header_request_fifo_cor_err_cnt),
4781[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4782 CNTR_NORMAL,
4783 access_sdma_header_storage_cor_err_cnt),
4784[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4785 CNTR_NORMAL,
4786 access_sdma_packet_tracking_cor_err_cnt),
4787[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4788 CNTR_NORMAL,
4789 access_sdma_assembly_cor_err_cnt),
4790[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_sdma_desc_table_cor_err_cnt),
4793[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4794 0, 0, CNTR_NORMAL,
4795 access_sdma_header_request_fifo_unc_err_cnt),
4796[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_sdma_header_storage_unc_err_cnt),
4799[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_sdma_packet_tracking_unc_err_cnt),
4802[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_sdma_assembly_unc_err_cnt),
4805[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_sdma_desc_table_unc_err_cnt),
4808[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4809 CNTR_NORMAL,
4810 access_sdma_timeout_err_cnt),
4811[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_sdma_header_length_err_cnt),
4814[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_sdma_header_address_err_cnt),
4817[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4818 CNTR_NORMAL,
4819 access_sdma_header_select_err_cnt),
4820[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4821 CNTR_NORMAL,
4822 access_sdma_reserved_9_err_cnt),
4823[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_sdma_packet_desc_overflow_err_cnt),
4826[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4827 CNTR_NORMAL,
4828 access_sdma_length_mismatch_err_cnt),
4829[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_sdma_halt_err_cnt),
4832[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_sdma_mem_read_err_cnt),
4835[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4836 CNTR_NORMAL,
4837 access_sdma_first_desc_err_cnt),
4838[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4839 CNTR_NORMAL,
4840 access_sdma_tail_out_of_bounds_err_cnt),
4841[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_sdma_too_long_err_cnt),
4844[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4845 CNTR_NORMAL,
4846 access_sdma_gen_mismatch_err_cnt),
4847[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004850};
4851
4852static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4853[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4854 CNTR_NORMAL),
4855[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4856 CNTR_NORMAL),
4857[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4858 CNTR_NORMAL),
4859[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4860 CNTR_NORMAL),
4861[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4862 CNTR_NORMAL),
4863[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4864 CNTR_NORMAL),
4865[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4866 CNTR_NORMAL),
4867[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4868[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4869[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4870[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4871 CNTR_SYNTH | CNTR_VL),
4872[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4873 CNTR_SYNTH | CNTR_VL),
4874[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4875 CNTR_SYNTH | CNTR_VL),
4876[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4877[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4878[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4879 access_sw_link_dn_cnt),
4880[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4881 access_sw_link_up_cnt),
4882[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4883 access_sw_xmit_discards),
4884[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4885 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4886 access_sw_xmit_discards),
4887[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4888 access_xmit_constraint_errs),
4889[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4890 access_rcv_constraint_errs),
4891[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4892[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4893[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4894[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4895[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4896[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4897[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4898[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4899[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4900[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4901[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4902[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4903[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4904 access_sw_cpu_rc_acks),
4905[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4906 access_sw_cpu_rc_qacks),
4907[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4908 access_sw_cpu_rc_delayed_comp),
4909[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4910[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4911[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4912[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4913[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4914[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4915[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4916[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4917[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4918[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4919[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4920[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4921[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
4922[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
4923[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
4924[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
4925[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
4926[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
4927[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
4928[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
4929[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
4930[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
4931[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
4932[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
4933[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
4934[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
4935[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
4936[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
4937[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
4938[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
4939[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
4940[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
4941[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
4942[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
4943[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
4944[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
4945[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
4946[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
4947[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
4948[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
4949[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
4950[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
4951[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
4952[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
4953[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
4954[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
4955[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
4956[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
4957[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
4958[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
4959[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
4960[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
4961[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
4962[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
4963[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
4964[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
4965[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
4966[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
4967[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
4968[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
4969[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
4970[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
4971[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
4972[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
4973[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
4974[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
4975[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
4976[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
4977[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
4978[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
4979[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
4980[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
4981[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
4982[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
4983[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
4984[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
4985[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
4986[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
4987[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
4988[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
4989};
4990
4991/* ======================================================================== */
4992
Mike Marciniszyn77241052015-07-30 15:17:43 -04004993/* return true if this is chip revision revision a */
4994int is_ax(struct hfi1_devdata *dd)
4995{
4996 u8 chip_rev_minor =
4997 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
4998 & CCE_REVISION_CHIP_REV_MINOR_MASK;
4999 return (chip_rev_minor & 0xf0) == 0;
5000}
5001
5002/* return true if this is chip revision revision b */
5003int is_bx(struct hfi1_devdata *dd)
5004{
5005 u8 chip_rev_minor =
5006 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5007 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005008 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005009}
5010
5011/*
5012 * Append string s to buffer buf. Arguments curp and len are the current
5013 * position and remaining length, respectively.
5014 *
5015 * return 0 on success, 1 on out of room
5016 */
5017static int append_str(char *buf, char **curp, int *lenp, const char *s)
5018{
5019 char *p = *curp;
5020 int len = *lenp;
5021 int result = 0; /* success */
5022 char c;
5023
5024 /* add a comma, if first in the buffer */
5025 if (p != buf) {
5026 if (len == 0) {
5027 result = 1; /* out of room */
5028 goto done;
5029 }
5030 *p++ = ',';
5031 len--;
5032 }
5033
5034 /* copy the string */
5035 while ((c = *s++) != 0) {
5036 if (len == 0) {
5037 result = 1; /* out of room */
5038 goto done;
5039 }
5040 *p++ = c;
5041 len--;
5042 }
5043
5044done:
5045 /* write return values */
5046 *curp = p;
5047 *lenp = len;
5048
5049 return result;
5050}
5051
5052/*
5053 * Using the given flag table, print a comma separated string into
5054 * the buffer. End in '*' if the buffer is too short.
5055 */
5056static char *flag_string(char *buf, int buf_len, u64 flags,
5057 struct flag_table *table, int table_size)
5058{
5059 char extra[32];
5060 char *p = buf;
5061 int len = buf_len;
5062 int no_room = 0;
5063 int i;
5064
5065 /* make sure there is at least 2 so we can form "*" */
5066 if (len < 2)
5067 return "";
5068
5069 len--; /* leave room for a nul */
5070 for (i = 0; i < table_size; i++) {
5071 if (flags & table[i].flag) {
5072 no_room = append_str(buf, &p, &len, table[i].str);
5073 if (no_room)
5074 break;
5075 flags &= ~table[i].flag;
5076 }
5077 }
5078
5079 /* any undocumented bits left? */
5080 if (!no_room && flags) {
5081 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5082 no_room = append_str(buf, &p, &len, extra);
5083 }
5084
5085 /* add * if ran out of room */
5086 if (no_room) {
5087 /* may need to back up to add space for a '*' */
5088 if (len == 0)
5089 --p;
5090 *p++ = '*';
5091 }
5092
5093 /* add final nul - space already allocated above */
5094 *p = 0;
5095 return buf;
5096}
5097
5098/* first 8 CCE error interrupt source names */
5099static const char * const cce_misc_names[] = {
5100 "CceErrInt", /* 0 */
5101 "RxeErrInt", /* 1 */
5102 "MiscErrInt", /* 2 */
5103 "Reserved3", /* 3 */
5104 "PioErrInt", /* 4 */
5105 "SDmaErrInt", /* 5 */
5106 "EgressErrInt", /* 6 */
5107 "TxeErrInt" /* 7 */
5108};
5109
5110/*
5111 * Return the miscellaneous error interrupt name.
5112 */
5113static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5114{
5115 if (source < ARRAY_SIZE(cce_misc_names))
5116 strncpy(buf, cce_misc_names[source], bsize);
5117 else
5118 snprintf(buf,
5119 bsize,
5120 "Reserved%u",
5121 source + IS_GENERAL_ERR_START);
5122
5123 return buf;
5124}
5125
5126/*
5127 * Return the SDMA engine error interrupt name.
5128 */
5129static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5130{
5131 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5132 return buf;
5133}
5134
5135/*
5136 * Return the send context error interrupt name.
5137 */
5138static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5139{
5140 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5141 return buf;
5142}
5143
5144static const char * const various_names[] = {
5145 "PbcInt",
5146 "GpioAssertInt",
5147 "Qsfp1Int",
5148 "Qsfp2Int",
5149 "TCritInt"
5150};
5151
5152/*
5153 * Return the various interrupt name.
5154 */
5155static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5156{
5157 if (source < ARRAY_SIZE(various_names))
5158 strncpy(buf, various_names[source], bsize);
5159 else
5160 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5161 return buf;
5162}
5163
5164/*
5165 * Return the DC interrupt name.
5166 */
5167static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5168{
5169 static const char * const dc_int_names[] = {
5170 "common",
5171 "lcb",
5172 "8051",
5173 "lbm" /* local block merge */
5174 };
5175
5176 if (source < ARRAY_SIZE(dc_int_names))
5177 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5178 else
5179 snprintf(buf, bsize, "DCInt%u", source);
5180 return buf;
5181}
5182
5183static const char * const sdma_int_names[] = {
5184 "SDmaInt",
5185 "SdmaIdleInt",
5186 "SdmaProgressInt",
5187};
5188
5189/*
5190 * Return the SDMA engine interrupt name.
5191 */
5192static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5193{
5194 /* what interrupt */
5195 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5196 /* which engine */
5197 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5198
5199 if (likely(what < 3))
5200 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5201 else
5202 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5203 return buf;
5204}
5205
5206/*
5207 * Return the receive available interrupt name.
5208 */
5209static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5210{
5211 snprintf(buf, bsize, "RcvAvailInt%u", source);
5212 return buf;
5213}
5214
5215/*
5216 * Return the receive urgent interrupt name.
5217 */
5218static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5219{
5220 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5221 return buf;
5222}
5223
5224/*
5225 * Return the send credit interrupt name.
5226 */
5227static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5228{
5229 snprintf(buf, bsize, "SendCreditInt%u", source);
5230 return buf;
5231}
5232
5233/*
5234 * Return the reserved interrupt name.
5235 */
5236static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5237{
5238 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5239 return buf;
5240}
5241
5242static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5243{
5244 return flag_string(buf, buf_len, flags,
5245 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5246}
5247
5248static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5249{
5250 return flag_string(buf, buf_len, flags,
5251 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5252}
5253
5254static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5255{
5256 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5257 ARRAY_SIZE(misc_err_status_flags));
5258}
5259
5260static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5261{
5262 return flag_string(buf, buf_len, flags,
5263 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5264}
5265
5266static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5267{
5268 return flag_string(buf, buf_len, flags,
5269 sdma_err_status_flags,
5270 ARRAY_SIZE(sdma_err_status_flags));
5271}
5272
5273static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5274{
5275 return flag_string(buf, buf_len, flags,
5276 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5277}
5278
5279static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5280{
5281 return flag_string(buf, buf_len, flags,
5282 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5283}
5284
5285static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5286{
5287 return flag_string(buf, buf_len, flags,
5288 send_err_status_flags,
5289 ARRAY_SIZE(send_err_status_flags));
5290}
5291
5292static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5293{
5294 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005295 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005296
5297 /*
5298 * For most these errors, there is nothing that can be done except
5299 * report or record it.
5300 */
5301 dd_dev_info(dd, "CCE Error: %s\n",
5302 cce_err_status_string(buf, sizeof(buf), reg));
5303
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005304 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5305 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005306 /* this error requires a manual drop into SPC freeze mode */
5307 /* then a fix up */
5308 start_freeze_handling(dd->pport, FREEZE_SELF);
5309 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005310
5311 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5312 if (reg & (1ull << i)) {
5313 incr_cntr64(&dd->cce_err_status_cnt[i]);
5314 /* maintain a counter over all cce_err_status errors */
5315 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5316 }
5317 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005318}
5319
5320/*
5321 * Check counters for receive errors that do not have an interrupt
5322 * associated with them.
5323 */
5324#define RCVERR_CHECK_TIME 10
5325static void update_rcverr_timer(unsigned long opaque)
5326{
5327 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5328 struct hfi1_pportdata *ppd = dd->pport;
5329 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5330
5331 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5332 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5333 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5334 set_link_down_reason(ppd,
5335 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5336 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5337 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5338 }
5339 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5340
5341 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5342}
5343
5344static int init_rcverr(struct hfi1_devdata *dd)
5345{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305346 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005347 /* Assume the hardware counter has been reset */
5348 dd->rcv_ovfl_cnt = 0;
5349 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5350}
5351
5352static void free_rcverr(struct hfi1_devdata *dd)
5353{
5354 if (dd->rcverr_timer.data)
5355 del_timer_sync(&dd->rcverr_timer);
5356 dd->rcverr_timer.data = 0;
5357}
5358
5359static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5360{
5361 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005362 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005363
5364 dd_dev_info(dd, "Receive Error: %s\n",
5365 rxe_err_status_string(buf, sizeof(buf), reg));
5366
5367 if (reg & ALL_RXE_FREEZE_ERR) {
5368 int flags = 0;
5369
5370 /*
5371 * Freeze mode recovery is disabled for the errors
5372 * in RXE_FREEZE_ABORT_MASK
5373 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005374 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005375 flags = FREEZE_ABORT;
5376
5377 start_freeze_handling(dd->pport, flags);
5378 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005379
5380 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5381 if (reg & (1ull << i))
5382 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5383 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005384}
5385
5386static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5387{
5388 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005389 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005390
5391 dd_dev_info(dd, "Misc Error: %s",
5392 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005393 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5394 if (reg & (1ull << i))
5395 incr_cntr64(&dd->misc_err_status_cnt[i]);
5396 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005397}
5398
5399static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5400{
5401 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005402 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005403
5404 dd_dev_info(dd, "PIO Error: %s\n",
5405 pio_err_status_string(buf, sizeof(buf), reg));
5406
5407 if (reg & ALL_PIO_FREEZE_ERR)
5408 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005409
5410 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5411 if (reg & (1ull << i))
5412 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5413 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005414}
5415
5416static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5417{
5418 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005419 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005420
5421 dd_dev_info(dd, "SDMA Error: %s\n",
5422 sdma_err_status_string(buf, sizeof(buf), reg));
5423
5424 if (reg & ALL_SDMA_FREEZE_ERR)
5425 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005426
5427 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5428 if (reg & (1ull << i))
5429 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5430 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005431}
5432
5433static void count_port_inactive(struct hfi1_devdata *dd)
5434{
5435 struct hfi1_pportdata *ppd = dd->pport;
5436
5437 if (ppd->port_xmit_discards < ~(u64)0)
5438 ppd->port_xmit_discards++;
5439}
5440
5441/*
5442 * We have had a "disallowed packet" error during egress. Determine the
5443 * integrity check which failed, and update relevant error counter, etc.
5444 *
5445 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5446 * bit of state per integrity check, and so we can miss the reason for an
5447 * egress error if more than one packet fails the same integrity check
5448 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5449 */
5450static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5451{
5452 struct hfi1_pportdata *ppd = dd->pport;
5453 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5454 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5455 char buf[96];
5456
5457 /* clear down all observed info as quickly as possible after read */
5458 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5459
5460 dd_dev_info(dd,
5461 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5462 info, egress_err_info_string(buf, sizeof(buf), info), src);
5463
5464 /* Eventually add other counters for each bit */
5465
5466 if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5467 if (ppd->port_xmit_discards < ~(u64)0)
5468 ppd->port_xmit_discards++;
5469 }
5470}
5471
5472/*
5473 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5474 * register. Does it represent a 'port inactive' error?
5475 */
5476static inline int port_inactive_err(u64 posn)
5477{
5478 return (posn >= SEES(TX_LINKDOWN) &&
5479 posn <= SEES(TX_INCORRECT_LINK_STATE));
5480}
5481
5482/*
5483 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5484 * register. Does it represent a 'disallowed packet' error?
5485 */
5486static inline int disallowed_pkt_err(u64 posn)
5487{
5488 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5489 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5490}
5491
5492static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5493{
5494 u64 reg_copy = reg, handled = 0;
5495 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005496 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005497
5498 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5499 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005500 if (is_ax(dd) && (reg &
Mike Marciniszyn77241052015-07-30 15:17:43 -04005501 SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5502 && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5503 start_freeze_handling(dd->pport, 0);
5504
5505 while (reg_copy) {
5506 int posn = fls64(reg_copy);
5507 /*
5508 * fls64() returns a 1-based offset, but we generally
5509 * want 0-based offsets.
5510 */
5511 int shift = posn - 1;
5512
5513 if (port_inactive_err(shift)) {
5514 count_port_inactive(dd);
5515 handled |= (1ULL << shift);
5516 } else if (disallowed_pkt_err(shift)) {
5517 handle_send_egress_err_info(dd);
5518 handled |= (1ULL << shift);
5519 }
5520 clear_bit(shift, (unsigned long *)&reg_copy);
5521 }
5522
5523 reg &= ~handled;
5524
5525 if (reg)
5526 dd_dev_info(dd, "Egress Error: %s\n",
5527 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005528
5529 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5530 if (reg & (1ull << i))
5531 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5532 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005533}
5534
5535static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5536{
5537 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005538 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005539
5540 dd_dev_info(dd, "Send Error: %s\n",
5541 send_err_status_string(buf, sizeof(buf), reg));
5542
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005543 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5544 if (reg & (1ull << i))
5545 incr_cntr64(&dd->send_err_status_cnt[i]);
5546 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005547}
5548
5549/*
5550 * The maximum number of times the error clear down will loop before
5551 * blocking a repeating error. This value is arbitrary.
5552 */
5553#define MAX_CLEAR_COUNT 20
5554
5555/*
5556 * Clear and handle an error register. All error interrupts are funneled
5557 * through here to have a central location to correctly handle single-
5558 * or multi-shot errors.
5559 *
5560 * For non per-context registers, call this routine with a context value
5561 * of 0 so the per-context offset is zero.
5562 *
5563 * If the handler loops too many times, assume that something is wrong
5564 * and can't be fixed, so mask the error bits.
5565 */
5566static void interrupt_clear_down(struct hfi1_devdata *dd,
5567 u32 context,
5568 const struct err_reg_info *eri)
5569{
5570 u64 reg;
5571 u32 count;
5572
5573 /* read in a loop until no more errors are seen */
5574 count = 0;
5575 while (1) {
5576 reg = read_kctxt_csr(dd, context, eri->status);
5577 if (reg == 0)
5578 break;
5579 write_kctxt_csr(dd, context, eri->clear, reg);
5580 if (likely(eri->handler))
5581 eri->handler(dd, context, reg);
5582 count++;
5583 if (count > MAX_CLEAR_COUNT) {
5584 u64 mask;
5585
5586 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5587 eri->desc, reg);
5588 /*
5589 * Read-modify-write so any other masked bits
5590 * remain masked.
5591 */
5592 mask = read_kctxt_csr(dd, context, eri->mask);
5593 mask &= ~reg;
5594 write_kctxt_csr(dd, context, eri->mask, mask);
5595 break;
5596 }
5597 }
5598}
5599
5600/*
5601 * CCE block "misc" interrupt. Source is < 16.
5602 */
5603static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5604{
5605 const struct err_reg_info *eri = &misc_errs[source];
5606
5607 if (eri->handler) {
5608 interrupt_clear_down(dd, 0, eri);
5609 } else {
5610 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5611 source);
5612 }
5613}
5614
5615static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5616{
5617 return flag_string(buf, buf_len, flags,
5618 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5619}
5620
5621/*
5622 * Send context error interrupt. Source (hw_context) is < 160.
5623 *
5624 * All send context errors cause the send context to halt. The normal
5625 * clear-down mechanism cannot be used because we cannot clear the
5626 * error bits until several other long-running items are done first.
5627 * This is OK because with the context halted, nothing else is going
5628 * to happen on it anyway.
5629 */
5630static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5631 unsigned int hw_context)
5632{
5633 struct send_context_info *sci;
5634 struct send_context *sc;
5635 char flags[96];
5636 u64 status;
5637 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005638 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005639
5640 sw_index = dd->hw_to_sw[hw_context];
5641 if (sw_index >= dd->num_send_contexts) {
5642 dd_dev_err(dd,
5643 "out of range sw index %u for send context %u\n",
5644 sw_index, hw_context);
5645 return;
5646 }
5647 sci = &dd->send_contexts[sw_index];
5648 sc = sci->sc;
5649 if (!sc) {
5650 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5651 sw_index, hw_context);
5652 return;
5653 }
5654
5655 /* tell the software that a halt has begun */
5656 sc_stop(sc, SCF_HALTED);
5657
5658 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5659
5660 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5661 send_context_err_status_string(flags, sizeof(flags), status));
5662
5663 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5664 handle_send_egress_err_info(dd);
5665
5666 /*
5667 * Automatically restart halted kernel contexts out of interrupt
5668 * context. User contexts must ask the driver to restart the context.
5669 */
5670 if (sc->type != SC_USER)
5671 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005672
5673 /*
5674 * Update the counters for the corresponding status bits.
5675 * Note that these particular counters are aggregated over all
5676 * 160 contexts.
5677 */
5678 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5679 if (status & (1ull << i))
5680 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5681 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005682}
5683
5684static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5685 unsigned int source, u64 status)
5686{
5687 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005688 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005689
5690 sde = &dd->per_sdma[source];
5691#ifdef CONFIG_SDMA_VERBOSITY
5692 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5693 slashstrip(__FILE__), __LINE__, __func__);
5694 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5695 sde->this_idx, source, (unsigned long long)status);
5696#endif
5697 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005698
5699 /*
5700 * Update the counters for the corresponding status bits.
5701 * Note that these particular counters are aggregated over
5702 * all 16 DMA engines.
5703 */
5704 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5705 if (status & (1ull << i))
5706 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5707 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005708}
5709
5710/*
5711 * CCE block SDMA error interrupt. Source is < 16.
5712 */
5713static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5714{
5715#ifdef CONFIG_SDMA_VERBOSITY
5716 struct sdma_engine *sde = &dd->per_sdma[source];
5717
5718 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5719 slashstrip(__FILE__), __LINE__, __func__);
5720 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5721 source);
5722 sdma_dumpstate(sde);
5723#endif
5724 interrupt_clear_down(dd, source, &sdma_eng_err);
5725}
5726
5727/*
5728 * CCE block "various" interrupt. Source is < 8.
5729 */
5730static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5731{
5732 const struct err_reg_info *eri = &various_err[source];
5733
5734 /*
5735 * TCritInt cannot go through interrupt_clear_down()
5736 * because it is not a second tier interrupt. The handler
5737 * should be called directly.
5738 */
5739 if (source == TCRIT_INT_SOURCE)
5740 handle_temp_err(dd);
5741 else if (eri->handler)
5742 interrupt_clear_down(dd, 0, eri);
5743 else
5744 dd_dev_info(dd,
5745 "%s: Unimplemented/reserved interrupt %d\n",
5746 __func__, source);
5747}
5748
5749static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5750{
5751 /* source is always zero */
5752 struct hfi1_pportdata *ppd = dd->pport;
5753 unsigned long flags;
5754 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5755
5756 if (reg & QSFP_HFI0_MODPRST_N) {
5757
5758 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5759 __func__);
5760
5761 if (!qsfp_mod_present(ppd)) {
5762 ppd->driver_link_ready = 0;
5763 /*
5764 * Cable removed, reset all our information about the
5765 * cache and cable capabilities
5766 */
5767
5768 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5769 /*
5770 * We don't set cache_refresh_required here as we expect
5771 * an interrupt when a cable is inserted
5772 */
5773 ppd->qsfp_info.cache_valid = 0;
5774 ppd->qsfp_info.qsfp_interrupt_functional = 0;
5775 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5776 flags);
5777 write_csr(dd,
5778 dd->hfi1_id ?
5779 ASIC_QSFP2_INVERT :
5780 ASIC_QSFP1_INVERT,
5781 qsfp_int_mgmt);
5782 if (ppd->host_link_state == HLS_DN_POLL) {
5783 /*
5784 * The link is still in POLL. This means
5785 * that the normal link down processing
5786 * will not happen. We have to do it here
5787 * before turning the DC off.
5788 */
5789 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5790 }
5791 } else {
5792 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5793 ppd->qsfp_info.cache_valid = 0;
5794 ppd->qsfp_info.cache_refresh_required = 1;
5795 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5796 flags);
5797
5798 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
5799 write_csr(dd,
5800 dd->hfi1_id ?
5801 ASIC_QSFP2_INVERT :
5802 ASIC_QSFP1_INVERT,
5803 qsfp_int_mgmt);
5804 }
5805 }
5806
5807 if (reg & QSFP_HFI0_INT_N) {
5808
5809 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5810 __func__);
5811 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5812 ppd->qsfp_info.check_interrupt_flags = 1;
5813 ppd->qsfp_info.qsfp_interrupt_functional = 1;
5814 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5815 }
5816
5817 /* Schedule the QSFP work only if there is a cable attached. */
5818 if (qsfp_mod_present(ppd))
5819 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5820}
5821
5822static int request_host_lcb_access(struct hfi1_devdata *dd)
5823{
5824 int ret;
5825
5826 ret = do_8051_command(dd, HCMD_MISC,
5827 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5828 NULL);
5829 if (ret != HCMD_SUCCESS) {
5830 dd_dev_err(dd, "%s: command failed with error %d\n",
5831 __func__, ret);
5832 }
5833 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5834}
5835
5836static int request_8051_lcb_access(struct hfi1_devdata *dd)
5837{
5838 int ret;
5839
5840 ret = do_8051_command(dd, HCMD_MISC,
5841 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5842 NULL);
5843 if (ret != HCMD_SUCCESS) {
5844 dd_dev_err(dd, "%s: command failed with error %d\n",
5845 __func__, ret);
5846 }
5847 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5848}
5849
5850/*
5851 * Set the LCB selector - allow host access. The DCC selector always
5852 * points to the host.
5853 */
5854static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5855{
5856 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5857 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5858 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5859}
5860
5861/*
5862 * Clear the LCB selector - allow 8051 access. The DCC selector always
5863 * points to the host.
5864 */
5865static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5866{
5867 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5868 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5869}
5870
5871/*
5872 * Acquire LCB access from the 8051. If the host already has access,
5873 * just increment a counter. Otherwise, inform the 8051 that the
5874 * host is taking access.
5875 *
5876 * Returns:
5877 * 0 on success
5878 * -EBUSY if the 8051 has control and cannot be disturbed
5879 * -errno if unable to acquire access from the 8051
5880 */
5881int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5882{
5883 struct hfi1_pportdata *ppd = dd->pport;
5884 int ret = 0;
5885
5886 /*
5887 * Use the host link state lock so the operation of this routine
5888 * { link state check, selector change, count increment } can occur
5889 * as a unit against a link state change. Otherwise there is a
5890 * race between the state change and the count increment.
5891 */
5892 if (sleep_ok) {
5893 mutex_lock(&ppd->hls_lock);
5894 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03005895 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005896 udelay(1);
5897 }
5898
5899 /* this access is valid only when the link is up */
5900 if ((ppd->host_link_state & HLS_UP) == 0) {
5901 dd_dev_info(dd, "%s: link state %s not up\n",
5902 __func__, link_state_name(ppd->host_link_state));
5903 ret = -EBUSY;
5904 goto done;
5905 }
5906
5907 if (dd->lcb_access_count == 0) {
5908 ret = request_host_lcb_access(dd);
5909 if (ret) {
5910 dd_dev_err(dd,
5911 "%s: unable to acquire LCB access, err %d\n",
5912 __func__, ret);
5913 goto done;
5914 }
5915 set_host_lcb_access(dd);
5916 }
5917 dd->lcb_access_count++;
5918done:
5919 mutex_unlock(&ppd->hls_lock);
5920 return ret;
5921}
5922
5923/*
5924 * Release LCB access by decrementing the use count. If the count is moving
5925 * from 1 to 0, inform 8051 that it has control back.
5926 *
5927 * Returns:
5928 * 0 on success
5929 * -errno if unable to release access to the 8051
5930 */
5931int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5932{
5933 int ret = 0;
5934
5935 /*
5936 * Use the host link state lock because the acquire needed it.
5937 * Here, we only need to keep { selector change, count decrement }
5938 * as a unit.
5939 */
5940 if (sleep_ok) {
5941 mutex_lock(&dd->pport->hls_lock);
5942 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03005943 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005944 udelay(1);
5945 }
5946
5947 if (dd->lcb_access_count == 0) {
5948 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
5949 __func__);
5950 goto done;
5951 }
5952
5953 if (dd->lcb_access_count == 1) {
5954 set_8051_lcb_access(dd);
5955 ret = request_8051_lcb_access(dd);
5956 if (ret) {
5957 dd_dev_err(dd,
5958 "%s: unable to release LCB access, err %d\n",
5959 __func__, ret);
5960 /* restore host access if the grant didn't work */
5961 set_host_lcb_access(dd);
5962 goto done;
5963 }
5964 }
5965 dd->lcb_access_count--;
5966done:
5967 mutex_unlock(&dd->pport->hls_lock);
5968 return ret;
5969}
5970
5971/*
5972 * Initialize LCB access variables and state. Called during driver load,
5973 * after most of the initialization is finished.
5974 *
5975 * The DC default is LCB access on for the host. The driver defaults to
5976 * leaving access to the 8051. Assign access now - this constrains the call
5977 * to this routine to be after all LCB set-up is done. In particular, after
5978 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
5979 */
5980static void init_lcb_access(struct hfi1_devdata *dd)
5981{
5982 dd->lcb_access_count = 0;
5983}
5984
5985/*
5986 * Write a response back to a 8051 request.
5987 */
5988static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
5989{
5990 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
5991 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
5992 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
5993 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
5994}
5995
5996/*
5997 * Handle requests from the 8051.
5998 */
5999static void handle_8051_request(struct hfi1_devdata *dd)
6000{
6001 u64 reg;
6002 u16 data;
6003 u8 type;
6004
6005 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6006 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6007 return; /* no request */
6008
6009 /* zero out COMPLETED so the response is seen */
6010 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6011
6012 /* extract request details */
6013 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6014 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6015 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6016 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6017
6018 switch (type) {
6019 case HREQ_LOAD_CONFIG:
6020 case HREQ_SAVE_CONFIG:
6021 case HREQ_READ_CONFIG:
6022 case HREQ_SET_TX_EQ_ABS:
6023 case HREQ_SET_TX_EQ_REL:
6024 case HREQ_ENABLE:
6025 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6026 type);
6027 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6028 break;
6029
6030 case HREQ_CONFIG_DONE:
6031 hreq_response(dd, HREQ_SUCCESS, 0);
6032 break;
6033
6034 case HREQ_INTERFACE_TEST:
6035 hreq_response(dd, HREQ_SUCCESS, data);
6036 break;
6037
6038 default:
6039 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6040 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6041 break;
6042 }
6043}
6044
6045static void write_global_credit(struct hfi1_devdata *dd,
6046 u8 vau, u16 total, u16 shared)
6047{
6048 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6049 ((u64)total
6050 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6051 | ((u64)shared
6052 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6053 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6054}
6055
6056/*
6057 * Set up initial VL15 credits of the remote. Assumes the rest of
6058 * the CM credit registers are zero from a previous global or credit reset .
6059 */
6060void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6061{
6062 /* leave shared count at zero for both global and VL15 */
6063 write_global_credit(dd, vau, vl15buf, 0);
6064
6065 /* We may need some credits for another VL when sending packets
6066 * with the snoop interface. Dividing it down the middle for VL15
6067 * and VL0 should suffice.
6068 */
6069 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6070 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6071 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6072 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6073 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6074 } else {
6075 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6076 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6077 }
6078}
6079
6080/*
6081 * Zero all credit details from the previous connection and
6082 * reset the CM manager's internal counters.
6083 */
6084void reset_link_credits(struct hfi1_devdata *dd)
6085{
6086 int i;
6087
6088 /* remove all previous VL credit limits */
6089 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6090 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6091 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6092 write_global_credit(dd, 0, 0, 0);
6093 /* reset the CM block */
6094 pio_send_control(dd, PSC_CM_RESET);
6095}
6096
6097/* convert a vCU to a CU */
6098static u32 vcu_to_cu(u8 vcu)
6099{
6100 return 1 << vcu;
6101}
6102
6103/* convert a CU to a vCU */
6104static u8 cu_to_vcu(u32 cu)
6105{
6106 return ilog2(cu);
6107}
6108
6109/* convert a vAU to an AU */
6110static u32 vau_to_au(u8 vau)
6111{
6112 return 8 * (1 << vau);
6113}
6114
6115static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6116{
6117 ppd->sm_trap_qp = 0x0;
6118 ppd->sa_qp = 0x1;
6119}
6120
6121/*
6122 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6123 */
6124static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6125{
6126 u64 reg;
6127
6128 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6129 write_csr(dd, DC_LCB_CFG_RUN, 0);
6130 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6131 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6132 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6133 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6134 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6135 reg = read_csr(dd, DCC_CFG_RESET);
6136 write_csr(dd, DCC_CFG_RESET,
6137 reg
6138 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6139 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6140 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6141 if (!abort) {
6142 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6143 write_csr(dd, DCC_CFG_RESET, reg);
6144 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6145 }
6146}
6147
6148/*
6149 * This routine should be called after the link has been transitioned to
6150 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6151 * reset).
6152 *
6153 * The expectation is that the caller of this routine would have taken
6154 * care of properly transitioning the link into the correct state.
6155 */
6156static void dc_shutdown(struct hfi1_devdata *dd)
6157{
6158 unsigned long flags;
6159
6160 spin_lock_irqsave(&dd->dc8051_lock, flags);
6161 if (dd->dc_shutdown) {
6162 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6163 return;
6164 }
6165 dd->dc_shutdown = 1;
6166 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6167 /* Shutdown the LCB */
6168 lcb_shutdown(dd, 1);
6169 /* Going to OFFLINE would have causes the 8051 to put the
6170 * SerDes into reset already. Just need to shut down the 8051,
6171 * itself. */
6172 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6173}
6174
6175/* Calling this after the DC has been brought out of reset should not
6176 * do any damage. */
6177static void dc_start(struct hfi1_devdata *dd)
6178{
6179 unsigned long flags;
6180 int ret;
6181
6182 spin_lock_irqsave(&dd->dc8051_lock, flags);
6183 if (!dd->dc_shutdown)
6184 goto done;
6185 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6186 /* Take the 8051 out of reset */
6187 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6188 /* Wait until 8051 is ready */
6189 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6190 if (ret) {
6191 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6192 __func__);
6193 }
6194 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6195 write_csr(dd, DCC_CFG_RESET, 0x10);
6196 /* lcb_shutdown() with abort=1 does not restore these */
6197 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6198 spin_lock_irqsave(&dd->dc8051_lock, flags);
6199 dd->dc_shutdown = 0;
6200done:
6201 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6202}
6203
6204/*
6205 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6206 */
6207static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6208{
6209 u64 rx_radr, tx_radr;
6210 u32 version;
6211
6212 if (dd->icode != ICODE_FPGA_EMULATION)
6213 return;
6214
6215 /*
6216 * These LCB defaults on emulator _s are good, nothing to do here:
6217 * LCB_CFG_TX_FIFOS_RADR
6218 * LCB_CFG_RX_FIFOS_RADR
6219 * LCB_CFG_LN_DCLK
6220 * LCB_CFG_IGNORE_LOST_RCLK
6221 */
6222 if (is_emulator_s(dd))
6223 return;
6224 /* else this is _p */
6225
6226 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006227 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006228 version = 0x2d; /* all B0 use 0x2d or higher settings */
6229
6230 if (version <= 0x12) {
6231 /* release 0x12 and below */
6232
6233 /*
6234 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6235 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6236 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6237 */
6238 rx_radr =
6239 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6240 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6241 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6242 /*
6243 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6244 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6245 */
6246 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6247 } else if (version <= 0x18) {
6248 /* release 0x13 up to 0x18 */
6249 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6250 rx_radr =
6251 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6252 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6253 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6254 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6255 } else if (version == 0x19) {
6256 /* release 0x19 */
6257 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6258 rx_radr =
6259 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6260 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6261 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6262 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6263 } else if (version == 0x1a) {
6264 /* release 0x1a */
6265 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6266 rx_radr =
6267 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6268 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6269 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6270 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6271 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6272 } else {
6273 /* release 0x1b and higher */
6274 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6275 rx_radr =
6276 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6277 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6278 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6279 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6280 }
6281
6282 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6283 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6284 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6285 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6286 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6287}
6288
6289/*
6290 * Handle a SMA idle message
6291 *
6292 * This is a work-queue function outside of the interrupt.
6293 */
6294void handle_sma_message(struct work_struct *work)
6295{
6296 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6297 sma_message_work);
6298 struct hfi1_devdata *dd = ppd->dd;
6299 u64 msg;
6300 int ret;
6301
6302 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6303 is stripped off */
6304 ret = read_idle_sma(dd, &msg);
6305 if (ret)
6306 return;
6307 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6308 /*
6309 * React to the SMA message. Byte[1] (0 for us) is the command.
6310 */
6311 switch (msg & 0xff) {
6312 case SMA_IDLE_ARM:
6313 /*
6314 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6315 * State Transitions
6316 *
6317 * Only expected in INIT or ARMED, discard otherwise.
6318 */
6319 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6320 ppd->neighbor_normal = 1;
6321 break;
6322 case SMA_IDLE_ACTIVE:
6323 /*
6324 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6325 * State Transitions
6326 *
6327 * Can activate the node. Discard otherwise.
6328 */
6329 if (ppd->host_link_state == HLS_UP_ARMED
6330 && ppd->is_active_optimize_enabled) {
6331 ppd->neighbor_normal = 1;
6332 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6333 if (ret)
6334 dd_dev_err(
6335 dd,
6336 "%s: received Active SMA idle message, couldn't set link to Active\n",
6337 __func__);
6338 }
6339 break;
6340 default:
6341 dd_dev_err(dd,
6342 "%s: received unexpected SMA idle message 0x%llx\n",
6343 __func__, msg);
6344 break;
6345 }
6346}
6347
6348static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6349{
6350 u64 rcvctrl;
6351 unsigned long flags;
6352
6353 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6354 rcvctrl = read_csr(dd, RCV_CTRL);
6355 rcvctrl |= add;
6356 rcvctrl &= ~clear;
6357 write_csr(dd, RCV_CTRL, rcvctrl);
6358 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6359}
6360
6361static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6362{
6363 adjust_rcvctrl(dd, add, 0);
6364}
6365
6366static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6367{
6368 adjust_rcvctrl(dd, 0, clear);
6369}
6370
6371/*
6372 * Called from all interrupt handlers to start handling an SPC freeze.
6373 */
6374void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6375{
6376 struct hfi1_devdata *dd = ppd->dd;
6377 struct send_context *sc;
6378 int i;
6379
6380 if (flags & FREEZE_SELF)
6381 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6382
6383 /* enter frozen mode */
6384 dd->flags |= HFI1_FROZEN;
6385
6386 /* notify all SDMA engines that they are going into a freeze */
6387 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6388
6389 /* do halt pre-handling on all enabled send contexts */
6390 for (i = 0; i < dd->num_send_contexts; i++) {
6391 sc = dd->send_contexts[i].sc;
6392 if (sc && (sc->flags & SCF_ENABLED))
6393 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6394 }
6395
6396 /* Send context are frozen. Notify user space */
6397 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6398
6399 if (flags & FREEZE_ABORT) {
6400 dd_dev_err(dd,
6401 "Aborted freeze recovery. Please REBOOT system\n");
6402 return;
6403 }
6404 /* queue non-interrupt handler */
6405 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6406}
6407
6408/*
6409 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6410 * depending on the "freeze" parameter.
6411 *
6412 * No need to return an error if it times out, our only option
6413 * is to proceed anyway.
6414 */
6415static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6416{
6417 unsigned long timeout;
6418 u64 reg;
6419
6420 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6421 while (1) {
6422 reg = read_csr(dd, CCE_STATUS);
6423 if (freeze) {
6424 /* waiting until all indicators are set */
6425 if ((reg & ALL_FROZE) == ALL_FROZE)
6426 return; /* all done */
6427 } else {
6428 /* waiting until all indicators are clear */
6429 if ((reg & ALL_FROZE) == 0)
6430 return; /* all done */
6431 }
6432
6433 if (time_after(jiffies, timeout)) {
6434 dd_dev_err(dd,
6435 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6436 freeze ? "" : "un",
6437 reg & ALL_FROZE,
6438 freeze ? ALL_FROZE : 0ull);
6439 return;
6440 }
6441 usleep_range(80, 120);
6442 }
6443}
6444
6445/*
6446 * Do all freeze handling for the RXE block.
6447 */
6448static void rxe_freeze(struct hfi1_devdata *dd)
6449{
6450 int i;
6451
6452 /* disable port */
6453 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6454
6455 /* disable all receive contexts */
6456 for (i = 0; i < dd->num_rcv_contexts; i++)
6457 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6458}
6459
6460/*
6461 * Unfreeze handling for the RXE block - kernel contexts only.
6462 * This will also enable the port. User contexts will do unfreeze
6463 * handling on a per-context basis as they call into the driver.
6464 *
6465 */
6466static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6467{
6468 int i;
6469
6470 /* enable all kernel contexts */
6471 for (i = 0; i < dd->n_krcv_queues; i++)
6472 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6473
6474 /* enable port */
6475 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6476}
6477
6478/*
6479 * Non-interrupt SPC freeze handling.
6480 *
6481 * This is a work-queue function outside of the triggering interrupt.
6482 */
6483void handle_freeze(struct work_struct *work)
6484{
6485 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6486 freeze_work);
6487 struct hfi1_devdata *dd = ppd->dd;
6488
6489 /* wait for freeze indicators on all affected blocks */
6490 dd_dev_info(dd, "Entering SPC freeze\n");
6491 wait_for_freeze_status(dd, 1);
6492
6493 /* SPC is now frozen */
6494
6495 /* do send PIO freeze steps */
6496 pio_freeze(dd);
6497
6498 /* do send DMA freeze steps */
6499 sdma_freeze(dd);
6500
6501 /* do send egress freeze steps - nothing to do */
6502
6503 /* do receive freeze steps */
6504 rxe_freeze(dd);
6505
6506 /*
6507 * Unfreeze the hardware - clear the freeze, wait for each
6508 * block's frozen bit to clear, then clear the frozen flag.
6509 */
6510 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6511 wait_for_freeze_status(dd, 0);
6512
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006513 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006514 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6515 wait_for_freeze_status(dd, 1);
6516 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6517 wait_for_freeze_status(dd, 0);
6518 }
6519
6520 /* do send PIO unfreeze steps for kernel contexts */
6521 pio_kernel_unfreeze(dd);
6522
6523 /* do send DMA unfreeze steps */
6524 sdma_unfreeze(dd);
6525
6526 /* do send egress unfreeze steps - nothing to do */
6527
6528 /* do receive unfreeze steps for kernel contexts */
6529 rxe_kernel_unfreeze(dd);
6530
6531 /*
6532 * The unfreeze procedure touches global device registers when
6533 * it disables and re-enables RXE. Mark the device unfrozen
6534 * after all that is done so other parts of the driver waiting
6535 * for the device to unfreeze don't do things out of order.
6536 *
6537 * The above implies that the meaning of HFI1_FROZEN flag is
6538 * "Device has gone into freeze mode and freeze mode handling
6539 * is still in progress."
6540 *
6541 * The flag will be removed when freeze mode processing has
6542 * completed.
6543 */
6544 dd->flags &= ~HFI1_FROZEN;
6545 wake_up(&dd->event_queue);
6546
6547 /* no longer frozen */
6548 dd_dev_err(dd, "Exiting SPC freeze\n");
6549}
6550
6551/*
6552 * Handle a link up interrupt from the 8051.
6553 *
6554 * This is a work-queue function outside of the interrupt.
6555 */
6556void handle_link_up(struct work_struct *work)
6557{
6558 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6559 link_up_work);
6560 set_link_state(ppd, HLS_UP_INIT);
6561
6562 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6563 read_ltp_rtt(ppd->dd);
6564 /*
6565 * OPA specifies that certain counters are cleared on a transition
6566 * to link up, so do that.
6567 */
6568 clear_linkup_counters(ppd->dd);
6569 /*
6570 * And (re)set link up default values.
6571 */
6572 set_linkup_defaults(ppd);
6573
6574 /* enforce link speed enabled */
6575 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6576 /* oops - current speed is not enabled, bounce */
6577 dd_dev_err(ppd->dd,
6578 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6579 ppd->link_speed_active, ppd->link_speed_enabled);
6580 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6581 OPA_LINKDOWN_REASON_SPEED_POLICY);
6582 set_link_state(ppd, HLS_DN_OFFLINE);
6583 start_link(ppd);
6584 }
6585}
6586
6587/* Several pieces of LNI information were cached for SMA in ppd.
6588 * Reset these on link down */
6589static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6590{
6591 ppd->neighbor_guid = 0;
6592 ppd->neighbor_port_number = 0;
6593 ppd->neighbor_type = 0;
6594 ppd->neighbor_fm_security = 0;
6595}
6596
6597/*
6598 * Handle a link down interrupt from the 8051.
6599 *
6600 * This is a work-queue function outside of the interrupt.
6601 */
6602void handle_link_down(struct work_struct *work)
6603{
6604 u8 lcl_reason, neigh_reason = 0;
6605 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6606 link_down_work);
6607
6608 /* go offline first, then deal with reasons */
6609 set_link_state(ppd, HLS_DN_OFFLINE);
6610
6611 lcl_reason = 0;
6612 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6613
6614 /*
6615 * If no reason, assume peer-initiated but missed
6616 * LinkGoingDown idle flits.
6617 */
6618 if (neigh_reason == 0)
6619 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6620
6621 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6622
6623 reset_neighbor_info(ppd);
6624
6625 /* disable the port */
6626 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6627
6628 /* If there is no cable attached, turn the DC off. Otherwise,
6629 * start the link bring up. */
6630 if (!qsfp_mod_present(ppd))
6631 dc_shutdown(ppd->dd);
6632 else
6633 start_link(ppd);
6634}
6635
6636void handle_link_bounce(struct work_struct *work)
6637{
6638 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6639 link_bounce_work);
6640
6641 /*
6642 * Only do something if the link is currently up.
6643 */
6644 if (ppd->host_link_state & HLS_UP) {
6645 set_link_state(ppd, HLS_DN_OFFLINE);
6646 start_link(ppd);
6647 } else {
6648 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6649 __func__, link_state_name(ppd->host_link_state));
6650 }
6651}
6652
6653/*
6654 * Mask conversion: Capability exchange to Port LTP. The capability
6655 * exchange has an implicit 16b CRC that is mandatory.
6656 */
6657static int cap_to_port_ltp(int cap)
6658{
6659 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6660
6661 if (cap & CAP_CRC_14B)
6662 port_ltp |= PORT_LTP_CRC_MODE_14;
6663 if (cap & CAP_CRC_48B)
6664 port_ltp |= PORT_LTP_CRC_MODE_48;
6665 if (cap & CAP_CRC_12B_16B_PER_LANE)
6666 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6667
6668 return port_ltp;
6669}
6670
6671/*
6672 * Convert an OPA Port LTP mask to capability mask
6673 */
6674int port_ltp_to_cap(int port_ltp)
6675{
6676 int cap_mask = 0;
6677
6678 if (port_ltp & PORT_LTP_CRC_MODE_14)
6679 cap_mask |= CAP_CRC_14B;
6680 if (port_ltp & PORT_LTP_CRC_MODE_48)
6681 cap_mask |= CAP_CRC_48B;
6682 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6683 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6684
6685 return cap_mask;
6686}
6687
6688/*
6689 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6690 */
6691static int lcb_to_port_ltp(int lcb_crc)
6692{
6693 int port_ltp = 0;
6694
6695 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6696 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6697 else if (lcb_crc == LCB_CRC_48B)
6698 port_ltp = PORT_LTP_CRC_MODE_48;
6699 else if (lcb_crc == LCB_CRC_14B)
6700 port_ltp = PORT_LTP_CRC_MODE_14;
6701 else
6702 port_ltp = PORT_LTP_CRC_MODE_16;
6703
6704 return port_ltp;
6705}
6706
6707/*
6708 * Our neighbor has indicated that we are allowed to act as a fabric
6709 * manager, so place the full management partition key in the second
6710 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6711 * that we should already have the limited management partition key in
6712 * array element 1, and also that the port is not yet up when
6713 * add_full_mgmt_pkey() is invoked.
6714 */
6715static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6716{
6717 struct hfi1_devdata *dd = ppd->dd;
6718
Dean Luick87645222015-12-01 15:38:21 -05006719 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6720 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6721 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6722 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006723 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6724 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6725}
6726
6727/*
6728 * Convert the given link width to the OPA link width bitmask.
6729 */
6730static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6731{
6732 switch (width) {
6733 case 0:
6734 /*
6735 * Simulator and quick linkup do not set the width.
6736 * Just set it to 4x without complaint.
6737 */
6738 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6739 return OPA_LINK_WIDTH_4X;
6740 return 0; /* no lanes up */
6741 case 1: return OPA_LINK_WIDTH_1X;
6742 case 2: return OPA_LINK_WIDTH_2X;
6743 case 3: return OPA_LINK_WIDTH_3X;
6744 default:
6745 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6746 __func__, width);
6747 /* fall through */
6748 case 4: return OPA_LINK_WIDTH_4X;
6749 }
6750}
6751
6752/*
6753 * Do a population count on the bottom nibble.
6754 */
6755static const u8 bit_counts[16] = {
6756 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6757};
6758static inline u8 nibble_to_count(u8 nibble)
6759{
6760 return bit_counts[nibble & 0xf];
6761}
6762
6763/*
6764 * Read the active lane information from the 8051 registers and return
6765 * their widths.
6766 *
6767 * Active lane information is found in these 8051 registers:
6768 * enable_lane_tx
6769 * enable_lane_rx
6770 */
6771static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6772 u16 *rx_width)
6773{
6774 u16 tx, rx;
6775 u8 enable_lane_rx;
6776 u8 enable_lane_tx;
6777 u8 tx_polarity_inversion;
6778 u8 rx_polarity_inversion;
6779 u8 max_rate;
6780
6781 /* read the active lanes */
6782 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6783 &rx_polarity_inversion, &max_rate);
6784 read_local_lni(dd, &enable_lane_rx);
6785
6786 /* convert to counts */
6787 tx = nibble_to_count(enable_lane_tx);
6788 rx = nibble_to_count(enable_lane_rx);
6789
6790 /*
6791 * Set link_speed_active here, overriding what was set in
6792 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
6793 * set the max_rate field in handle_verify_cap until v0.19.
6794 */
6795 if ((dd->icode == ICODE_RTL_SILICON)
6796 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6797 /* max_rate: 0 = 12.5G, 1 = 25G */
6798 switch (max_rate) {
6799 case 0:
6800 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6801 break;
6802 default:
6803 dd_dev_err(dd,
6804 "%s: unexpected max rate %d, using 25Gb\n",
6805 __func__, (int)max_rate);
6806 /* fall through */
6807 case 1:
6808 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6809 break;
6810 }
6811 }
6812
6813 dd_dev_info(dd,
6814 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6815 enable_lane_tx, tx, enable_lane_rx, rx);
6816 *tx_width = link_width_to_bits(dd, tx);
6817 *rx_width = link_width_to_bits(dd, rx);
6818}
6819
6820/*
6821 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6822 * Valid after the end of VerifyCap and during LinkUp. Does not change
6823 * after link up. I.e. look elsewhere for downgrade information.
6824 *
6825 * Bits are:
6826 * + bits [7:4] contain the number of active transmitters
6827 * + bits [3:0] contain the number of active receivers
6828 * These are numbers 1 through 4 and can be different values if the
6829 * link is asymmetric.
6830 *
6831 * verify_cap_local_fm_link_width[0] retains its original value.
6832 */
6833static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6834 u16 *rx_width)
6835{
6836 u16 widths, tx, rx;
6837 u8 misc_bits, local_flags;
6838 u16 active_tx, active_rx;
6839
6840 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6841 tx = widths >> 12;
6842 rx = (widths >> 8) & 0xf;
6843
6844 *tx_width = link_width_to_bits(dd, tx);
6845 *rx_width = link_width_to_bits(dd, rx);
6846
6847 /* print the active widths */
6848 get_link_widths(dd, &active_tx, &active_rx);
6849}
6850
6851/*
6852 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6853 * hardware information when the link first comes up.
6854 *
6855 * The link width is not available until after VerifyCap.AllFramesReceived
6856 * (the trigger for handle_verify_cap), so this is outside that routine
6857 * and should be called when the 8051 signals linkup.
6858 */
6859void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6860{
6861 u16 tx_width, rx_width;
6862
6863 /* get end-of-LNI link widths */
6864 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
6865
6866 /* use tx_width as the link is supposed to be symmetric on link up */
6867 ppd->link_width_active = tx_width;
6868 /* link width downgrade active (LWD.A) starts out matching LW.A */
6869 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
6870 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
6871 /* per OPA spec, on link up LWD.E resets to LWD.S */
6872 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
6873 /* cache the active egress rate (units {10^6 bits/sec]) */
6874 ppd->current_egress_rate = active_egress_rate(ppd);
6875}
6876
6877/*
6878 * Handle a verify capabilities interrupt from the 8051.
6879 *
6880 * This is a work-queue function outside of the interrupt.
6881 */
6882void handle_verify_cap(struct work_struct *work)
6883{
6884 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6885 link_vc_work);
6886 struct hfi1_devdata *dd = ppd->dd;
6887 u64 reg;
6888 u8 power_management;
6889 u8 continious;
6890 u8 vcu;
6891 u8 vau;
6892 u8 z;
6893 u16 vl15buf;
6894 u16 link_widths;
6895 u16 crc_mask;
6896 u16 crc_val;
6897 u16 device_id;
6898 u16 active_tx, active_rx;
6899 u8 partner_supported_crc;
6900 u8 remote_tx_rate;
6901 u8 device_rev;
6902
6903 set_link_state(ppd, HLS_VERIFY_CAP);
6904
6905 lcb_shutdown(dd, 0);
6906 adjust_lcb_for_fpga_serdes(dd);
6907
6908 /*
6909 * These are now valid:
6910 * remote VerifyCap fields in the general LNI config
6911 * CSR DC8051_STS_REMOTE_GUID
6912 * CSR DC8051_STS_REMOTE_NODE_TYPE
6913 * CSR DC8051_STS_REMOTE_FM_SECURITY
6914 * CSR DC8051_STS_REMOTE_PORT_NO
6915 */
6916
6917 read_vc_remote_phy(dd, &power_management, &continious);
6918 read_vc_remote_fabric(
6919 dd,
6920 &vau,
6921 &z,
6922 &vcu,
6923 &vl15buf,
6924 &partner_supported_crc);
6925 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
6926 read_remote_device_id(dd, &device_id, &device_rev);
6927 /*
6928 * And the 'MgmtAllowed' information, which is exchanged during
6929 * LNI, is also be available at this point.
6930 */
6931 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
6932 /* print the active widths */
6933 get_link_widths(dd, &active_tx, &active_rx);
6934 dd_dev_info(dd,
6935 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
6936 (int)power_management, (int)continious);
6937 dd_dev_info(dd,
6938 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
6939 (int)vau,
6940 (int)z,
6941 (int)vcu,
6942 (int)vl15buf,
6943 (int)partner_supported_crc);
6944 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
6945 (u32)remote_tx_rate, (u32)link_widths);
6946 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
6947 (u32)device_id, (u32)device_rev);
6948 /*
6949 * The peer vAU value just read is the peer receiver value. HFI does
6950 * not support a transmit vAU of 0 (AU == 8). We advertised that
6951 * with Z=1 in the fabric capabilities sent to the peer. The peer
6952 * will see our Z=1, and, if it advertised a vAU of 0, will move its
6953 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
6954 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
6955 * subject to the Z value exception.
6956 */
6957 if (vau == 0)
6958 vau = 1;
6959 set_up_vl15(dd, vau, vl15buf);
6960
6961 /* set up the LCB CRC mode */
6962 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
6963
6964 /* order is important: use the lowest bit in common */
6965 if (crc_mask & CAP_CRC_14B)
6966 crc_val = LCB_CRC_14B;
6967 else if (crc_mask & CAP_CRC_48B)
6968 crc_val = LCB_CRC_48B;
6969 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
6970 crc_val = LCB_CRC_12B_16B_PER_LANE;
6971 else
6972 crc_val = LCB_CRC_16B;
6973
6974 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
6975 write_csr(dd, DC_LCB_CFG_CRC_MODE,
6976 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
6977
6978 /* set (14b only) or clear sideband credit */
6979 reg = read_csr(dd, SEND_CM_CTRL);
6980 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
6981 write_csr(dd, SEND_CM_CTRL,
6982 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
6983 } else {
6984 write_csr(dd, SEND_CM_CTRL,
6985 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
6986 }
6987
6988 ppd->link_speed_active = 0; /* invalid value */
6989 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
6990 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
6991 switch (remote_tx_rate) {
6992 case 0:
6993 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
6994 break;
6995 case 1:
6996 ppd->link_speed_active = OPA_LINK_SPEED_25G;
6997 break;
6998 }
6999 } else {
7000 /* actual rate is highest bit of the ANDed rates */
7001 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7002
7003 if (rate & 2)
7004 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7005 else if (rate & 1)
7006 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7007 }
7008 if (ppd->link_speed_active == 0) {
7009 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7010 __func__, (int)remote_tx_rate);
7011 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7012 }
7013
7014 /*
7015 * Cache the values of the supported, enabled, and active
7016 * LTP CRC modes to return in 'portinfo' queries. But the bit
7017 * flags that are returned in the portinfo query differ from
7018 * what's in the link_crc_mask, crc_sizes, and crc_val
7019 * variables. Convert these here.
7020 */
7021 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7022 /* supported crc modes */
7023 ppd->port_ltp_crc_mode |=
7024 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7025 /* enabled crc modes */
7026 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7027 /* active crc mode */
7028
7029 /* set up the remote credit return table */
7030 assign_remote_cm_au_table(dd, vcu);
7031
7032 /*
7033 * The LCB is reset on entry to handle_verify_cap(), so this must
7034 * be applied on every link up.
7035 *
7036 * Adjust LCB error kill enable to kill the link if
7037 * these RBUF errors are seen:
7038 * REPLAY_BUF_MBE_SMASK
7039 * FLIT_INPUT_BUF_MBE_SMASK
7040 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007041 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007042 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7043 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7044 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7045 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7046 }
7047
7048 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7049 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7050
7051 /* give 8051 access to the LCB CSRs */
7052 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7053 set_8051_lcb_access(dd);
7054
7055 ppd->neighbor_guid =
7056 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7057 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7058 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7059 ppd->neighbor_type =
7060 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7061 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7062 ppd->neighbor_fm_security =
7063 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7064 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7065 dd_dev_info(dd,
7066 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7067 ppd->neighbor_guid, ppd->neighbor_type,
7068 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7069 if (ppd->mgmt_allowed)
7070 add_full_mgmt_pkey(ppd);
7071
7072 /* tell the 8051 to go to LinkUp */
7073 set_link_state(ppd, HLS_GOING_UP);
7074}
7075
7076/*
7077 * Apply the link width downgrade enabled policy against the current active
7078 * link widths.
7079 *
7080 * Called when the enabled policy changes or the active link widths change.
7081 */
7082void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7083{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007084 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007085 int tries;
7086 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007087 u16 tx, rx;
7088
Dean Luick323fd782015-11-16 21:59:24 -05007089 /* use the hls lock to avoid a race with actual link up */
7090 tries = 0;
7091retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007092 mutex_lock(&ppd->hls_lock);
7093 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007094 if (!(ppd->host_link_state & HLS_UP)) {
7095 /* still going up..wait and retry */
7096 if (ppd->host_link_state & HLS_GOING_UP) {
7097 if (++tries < 1000) {
7098 mutex_unlock(&ppd->hls_lock);
7099 usleep_range(100, 120); /* arbitrary */
7100 goto retry;
7101 }
7102 dd_dev_err(ppd->dd,
7103 "%s: giving up waiting for link state change\n",
7104 __func__);
7105 }
7106 goto done;
7107 }
7108
7109 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007110
7111 if (refresh_widths) {
7112 get_link_widths(ppd->dd, &tx, &rx);
7113 ppd->link_width_downgrade_tx_active = tx;
7114 ppd->link_width_downgrade_rx_active = rx;
7115 }
7116
7117 if (lwde == 0) {
7118 /* downgrade is disabled */
7119
7120 /* bounce if not at starting active width */
7121 if ((ppd->link_width_active !=
7122 ppd->link_width_downgrade_tx_active)
7123 || (ppd->link_width_active !=
7124 ppd->link_width_downgrade_rx_active)) {
7125 dd_dev_err(ppd->dd,
7126 "Link downgrade is disabled and link has downgraded, downing link\n");
7127 dd_dev_err(ppd->dd,
7128 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7129 ppd->link_width_active,
7130 ppd->link_width_downgrade_tx_active,
7131 ppd->link_width_downgrade_rx_active);
7132 do_bounce = 1;
7133 }
7134 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7135 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7136 /* Tx or Rx is outside the enabled policy */
7137 dd_dev_err(ppd->dd,
7138 "Link is outside of downgrade allowed, downing link\n");
7139 dd_dev_err(ppd->dd,
7140 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7141 lwde,
7142 ppd->link_width_downgrade_tx_active,
7143 ppd->link_width_downgrade_rx_active);
7144 do_bounce = 1;
7145 }
7146
Dean Luick323fd782015-11-16 21:59:24 -05007147done:
7148 mutex_unlock(&ppd->hls_lock);
7149
Mike Marciniszyn77241052015-07-30 15:17:43 -04007150 if (do_bounce) {
7151 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7152 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7153 set_link_state(ppd, HLS_DN_OFFLINE);
7154 start_link(ppd);
7155 }
7156}
7157
7158/*
7159 * Handle a link downgrade interrupt from the 8051.
7160 *
7161 * This is a work-queue function outside of the interrupt.
7162 */
7163void handle_link_downgrade(struct work_struct *work)
7164{
7165 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7166 link_downgrade_work);
7167
7168 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7169 apply_link_downgrade_policy(ppd, 1);
7170}
7171
7172static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7173{
7174 return flag_string(buf, buf_len, flags, dcc_err_flags,
7175 ARRAY_SIZE(dcc_err_flags));
7176}
7177
7178static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7179{
7180 return flag_string(buf, buf_len, flags, lcb_err_flags,
7181 ARRAY_SIZE(lcb_err_flags));
7182}
7183
7184static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7185{
7186 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7187 ARRAY_SIZE(dc8051_err_flags));
7188}
7189
7190static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7191{
7192 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7193 ARRAY_SIZE(dc8051_info_err_flags));
7194}
7195
7196static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7197{
7198 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7199 ARRAY_SIZE(dc8051_info_host_msg_flags));
7200}
7201
7202static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7203{
7204 struct hfi1_pportdata *ppd = dd->pport;
7205 u64 info, err, host_msg;
7206 int queue_link_down = 0;
7207 char buf[96];
7208
7209 /* look at the flags */
7210 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7211 /* 8051 information set by firmware */
7212 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7213 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7214 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7215 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7216 host_msg = (info >>
7217 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7218 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7219
7220 /*
7221 * Handle error flags.
7222 */
7223 if (err & FAILED_LNI) {
7224 /*
7225 * LNI error indications are cleared by the 8051
7226 * only when starting polling. Only pay attention
7227 * to them when in the states that occur during
7228 * LNI.
7229 */
7230 if (ppd->host_link_state
7231 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7232 queue_link_down = 1;
7233 dd_dev_info(dd, "Link error: %s\n",
7234 dc8051_info_err_string(buf,
7235 sizeof(buf),
7236 err & FAILED_LNI));
7237 }
7238 err &= ~(u64)FAILED_LNI;
7239 }
7240 if (err) {
7241 /* report remaining errors, but do not do anything */
7242 dd_dev_err(dd, "8051 info error: %s\n",
7243 dc8051_info_err_string(buf, sizeof(buf), err));
7244 }
7245
7246 /*
7247 * Handle host message flags.
7248 */
7249 if (host_msg & HOST_REQ_DONE) {
7250 /*
7251 * Presently, the driver does a busy wait for
7252 * host requests to complete. This is only an
7253 * informational message.
7254 * NOTE: The 8051 clears the host message
7255 * information *on the next 8051 command*.
7256 * Therefore, when linkup is achieved,
7257 * this flag will still be set.
7258 */
7259 host_msg &= ~(u64)HOST_REQ_DONE;
7260 }
7261 if (host_msg & BC_SMA_MSG) {
7262 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7263 host_msg &= ~(u64)BC_SMA_MSG;
7264 }
7265 if (host_msg & LINKUP_ACHIEVED) {
7266 dd_dev_info(dd, "8051: Link up\n");
7267 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7268 host_msg &= ~(u64)LINKUP_ACHIEVED;
7269 }
7270 if (host_msg & EXT_DEVICE_CFG_REQ) {
7271 handle_8051_request(dd);
7272 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7273 }
7274 if (host_msg & VERIFY_CAP_FRAME) {
7275 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7276 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7277 }
7278 if (host_msg & LINK_GOING_DOWN) {
7279 const char *extra = "";
7280 /* no downgrade action needed if going down */
7281 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7282 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7283 extra = " (ignoring downgrade)";
7284 }
7285 dd_dev_info(dd, "8051: Link down%s\n", extra);
7286 queue_link_down = 1;
7287 host_msg &= ~(u64)LINK_GOING_DOWN;
7288 }
7289 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7290 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7291 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7292 }
7293 if (host_msg) {
7294 /* report remaining messages, but do not do anything */
7295 dd_dev_info(dd, "8051 info host message: %s\n",
7296 dc8051_info_host_msg_string(buf, sizeof(buf),
7297 host_msg));
7298 }
7299
7300 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7301 }
7302 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7303 /*
7304 * Lost the 8051 heartbeat. If this happens, we
7305 * receive constant interrupts about it. Disable
7306 * the interrupt after the first.
7307 */
7308 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7309 write_csr(dd, DC_DC8051_ERR_EN,
7310 read_csr(dd, DC_DC8051_ERR_EN)
7311 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7312
7313 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7314 }
7315 if (reg) {
7316 /* report the error, but do not do anything */
7317 dd_dev_err(dd, "8051 error: %s\n",
7318 dc8051_err_string(buf, sizeof(buf), reg));
7319 }
7320
7321 if (queue_link_down) {
7322 /* if the link is already going down or disabled, do not
7323 * queue another */
7324 if ((ppd->host_link_state
7325 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7326 || ppd->link_enabled == 0) {
7327 dd_dev_info(dd, "%s: not queuing link down\n",
7328 __func__);
7329 } else {
7330 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7331 }
7332 }
7333}
7334
7335static const char * const fm_config_txt[] = {
7336[0] =
7337 "BadHeadDist: Distance violation between two head flits",
7338[1] =
7339 "BadTailDist: Distance violation between two tail flits",
7340[2] =
7341 "BadCtrlDist: Distance violation between two credit control flits",
7342[3] =
7343 "BadCrdAck: Credits return for unsupported VL",
7344[4] =
7345 "UnsupportedVLMarker: Received VL Marker",
7346[5] =
7347 "BadPreempt: Exceeded the preemption nesting level",
7348[6] =
7349 "BadControlFlit: Received unsupported control flit",
7350/* no 7 */
7351[8] =
7352 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7353};
7354
7355static const char * const port_rcv_txt[] = {
7356[1] =
7357 "BadPktLen: Illegal PktLen",
7358[2] =
7359 "PktLenTooLong: Packet longer than PktLen",
7360[3] =
7361 "PktLenTooShort: Packet shorter than PktLen",
7362[4] =
7363 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7364[5] =
7365 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7366[6] =
7367 "BadL2: Illegal L2 opcode",
7368[7] =
7369 "BadSC: Unsupported SC",
7370[9] =
7371 "BadRC: Illegal RC",
7372[11] =
7373 "PreemptError: Preempting with same VL",
7374[12] =
7375 "PreemptVL15: Preempting a VL15 packet",
7376};
7377
7378#define OPA_LDR_FMCONFIG_OFFSET 16
7379#define OPA_LDR_PORTRCV_OFFSET 0
7380static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7381{
7382 u64 info, hdr0, hdr1;
7383 const char *extra;
7384 char buf[96];
7385 struct hfi1_pportdata *ppd = dd->pport;
7386 u8 lcl_reason = 0;
7387 int do_bounce = 0;
7388
7389 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7390 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7391 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7392 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7393 /* set status bit */
7394 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7395 }
7396 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7397 }
7398
7399 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7400 struct hfi1_pportdata *ppd = dd->pport;
7401 /* this counter saturates at (2^32) - 1 */
7402 if (ppd->link_downed < (u32)UINT_MAX)
7403 ppd->link_downed++;
7404 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7405 }
7406
7407 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7408 u8 reason_valid = 1;
7409
7410 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7411 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7412 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7413 /* set status bit */
7414 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7415 }
7416 switch (info) {
7417 case 0:
7418 case 1:
7419 case 2:
7420 case 3:
7421 case 4:
7422 case 5:
7423 case 6:
7424 extra = fm_config_txt[info];
7425 break;
7426 case 8:
7427 extra = fm_config_txt[info];
7428 if (ppd->port_error_action &
7429 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7430 do_bounce = 1;
7431 /*
7432 * lcl_reason cannot be derived from info
7433 * for this error
7434 */
7435 lcl_reason =
7436 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7437 }
7438 break;
7439 default:
7440 reason_valid = 0;
7441 snprintf(buf, sizeof(buf), "reserved%lld", info);
7442 extra = buf;
7443 break;
7444 }
7445
7446 if (reason_valid && !do_bounce) {
7447 do_bounce = ppd->port_error_action &
7448 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7449 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7450 }
7451
7452 /* just report this */
7453 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7454 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7455 }
7456
7457 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7458 u8 reason_valid = 1;
7459
7460 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7461 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7462 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7463 if (!(dd->err_info_rcvport.status_and_code &
7464 OPA_EI_STATUS_SMASK)) {
7465 dd->err_info_rcvport.status_and_code =
7466 info & OPA_EI_CODE_SMASK;
7467 /* set status bit */
7468 dd->err_info_rcvport.status_and_code |=
7469 OPA_EI_STATUS_SMASK;
7470 /* save first 2 flits in the packet that caused
7471 * the error */
7472 dd->err_info_rcvport.packet_flit1 = hdr0;
7473 dd->err_info_rcvport.packet_flit2 = hdr1;
7474 }
7475 switch (info) {
7476 case 1:
7477 case 2:
7478 case 3:
7479 case 4:
7480 case 5:
7481 case 6:
7482 case 7:
7483 case 9:
7484 case 11:
7485 case 12:
7486 extra = port_rcv_txt[info];
7487 break;
7488 default:
7489 reason_valid = 0;
7490 snprintf(buf, sizeof(buf), "reserved%lld", info);
7491 extra = buf;
7492 break;
7493 }
7494
7495 if (reason_valid && !do_bounce) {
7496 do_bounce = ppd->port_error_action &
7497 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7498 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7499 }
7500
7501 /* just report this */
7502 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7503 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7504 hdr0, hdr1);
7505
7506 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7507 }
7508
7509 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7510 /* informative only */
7511 dd_dev_info(dd, "8051 access to LCB blocked\n");
7512 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7513 }
7514 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7515 /* informative only */
7516 dd_dev_info(dd, "host access to LCB blocked\n");
7517 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7518 }
7519
7520 /* report any remaining errors */
7521 if (reg)
7522 dd_dev_info(dd, "DCC Error: %s\n",
7523 dcc_err_string(buf, sizeof(buf), reg));
7524
7525 if (lcl_reason == 0)
7526 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7527
7528 if (do_bounce) {
7529 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7530 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7531 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7532 }
7533}
7534
7535static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7536{
7537 char buf[96];
7538
7539 dd_dev_info(dd, "LCB Error: %s\n",
7540 lcb_err_string(buf, sizeof(buf), reg));
7541}
7542
7543/*
7544 * CCE block DC interrupt. Source is < 8.
7545 */
7546static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7547{
7548 const struct err_reg_info *eri = &dc_errs[source];
7549
7550 if (eri->handler) {
7551 interrupt_clear_down(dd, 0, eri);
7552 } else if (source == 3 /* dc_lbm_int */) {
7553 /*
7554 * This indicates that a parity error has occurred on the
7555 * address/control lines presented to the LBM. The error
7556 * is a single pulse, there is no associated error flag,
7557 * and it is non-maskable. This is because if a parity
7558 * error occurs on the request the request is dropped.
7559 * This should never occur, but it is nice to know if it
7560 * ever does.
7561 */
7562 dd_dev_err(dd, "Parity error in DC LBM block\n");
7563 } else {
7564 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7565 }
7566}
7567
7568/*
7569 * TX block send credit interrupt. Source is < 160.
7570 */
7571static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7572{
7573 sc_group_release_update(dd, source);
7574}
7575
7576/*
7577 * TX block SDMA interrupt. Source is < 48.
7578 *
7579 * SDMA interrupts are grouped by type:
7580 *
7581 * 0 - N-1 = SDma
7582 * N - 2N-1 = SDmaProgress
7583 * 2N - 3N-1 = SDmaIdle
7584 */
7585static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7586{
7587 /* what interrupt */
7588 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7589 /* which engine */
7590 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7591
7592#ifdef CONFIG_SDMA_VERBOSITY
7593 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7594 slashstrip(__FILE__), __LINE__, __func__);
7595 sdma_dumpstate(&dd->per_sdma[which]);
7596#endif
7597
7598 if (likely(what < 3 && which < dd->num_sdma)) {
7599 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7600 } else {
7601 /* should not happen */
7602 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7603 }
7604}
7605
7606/*
7607 * RX block receive available interrupt. Source is < 160.
7608 */
7609static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7610{
7611 struct hfi1_ctxtdata *rcd;
7612 char *err_detail;
7613
7614 if (likely(source < dd->num_rcv_contexts)) {
7615 rcd = dd->rcd[source];
7616 if (rcd) {
7617 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007618 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007619 else
7620 handle_user_interrupt(rcd);
7621 return; /* OK */
7622 }
7623 /* received an interrupt, but no rcd */
7624 err_detail = "dataless";
7625 } else {
7626 /* received an interrupt, but are not using that context */
7627 err_detail = "out of range";
7628 }
7629 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7630 err_detail, source);
7631}
7632
7633/*
7634 * RX block receive urgent interrupt. Source is < 160.
7635 */
7636static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7637{
7638 struct hfi1_ctxtdata *rcd;
7639 char *err_detail;
7640
7641 if (likely(source < dd->num_rcv_contexts)) {
7642 rcd = dd->rcd[source];
7643 if (rcd) {
7644 /* only pay attention to user urgent interrupts */
7645 if (source >= dd->first_user_ctxt)
7646 handle_user_interrupt(rcd);
7647 return; /* OK */
7648 }
7649 /* received an interrupt, but no rcd */
7650 err_detail = "dataless";
7651 } else {
7652 /* received an interrupt, but are not using that context */
7653 err_detail = "out of range";
7654 }
7655 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7656 err_detail, source);
7657}
7658
7659/*
7660 * Reserved range interrupt. Should not be called in normal operation.
7661 */
7662static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7663{
7664 char name[64];
7665
7666 dd_dev_err(dd, "unexpected %s interrupt\n",
7667 is_reserved_name(name, sizeof(name), source));
7668}
7669
7670static const struct is_table is_table[] = {
7671/* start end
7672 name func interrupt func */
7673{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7674 is_misc_err_name, is_misc_err_int },
7675{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7676 is_sdma_eng_err_name, is_sdma_eng_err_int },
7677{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7678 is_sendctxt_err_name, is_sendctxt_err_int },
7679{ IS_SDMA_START, IS_SDMA_END,
7680 is_sdma_eng_name, is_sdma_eng_int },
7681{ IS_VARIOUS_START, IS_VARIOUS_END,
7682 is_various_name, is_various_int },
7683{ IS_DC_START, IS_DC_END,
7684 is_dc_name, is_dc_int },
7685{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7686 is_rcv_avail_name, is_rcv_avail_int },
7687{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7688 is_rcv_urgent_name, is_rcv_urgent_int },
7689{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7690 is_send_credit_name, is_send_credit_int},
7691{ IS_RESERVED_START, IS_RESERVED_END,
7692 is_reserved_name, is_reserved_int},
7693};
7694
7695/*
7696 * Interrupt source interrupt - called when the given source has an interrupt.
7697 * Source is a bit index into an array of 64-bit integers.
7698 */
7699static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7700{
7701 const struct is_table *entry;
7702
7703 /* avoids a double compare by walking the table in-order */
7704 for (entry = &is_table[0]; entry->is_name; entry++) {
7705 if (source < entry->end) {
7706 trace_hfi1_interrupt(dd, entry, source);
7707 entry->is_int(dd, source - entry->start);
7708 return;
7709 }
7710 }
7711 /* fell off the end */
7712 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7713}
7714
7715/*
7716 * General interrupt handler. This is able to correctly handle
7717 * all interrupts in case INTx is used.
7718 */
7719static irqreturn_t general_interrupt(int irq, void *data)
7720{
7721 struct hfi1_devdata *dd = data;
7722 u64 regs[CCE_NUM_INT_CSRS];
7723 u32 bit;
7724 int i;
7725
7726 this_cpu_inc(*dd->int_counter);
7727
7728 /* phase 1: scan and clear all handled interrupts */
7729 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7730 if (dd->gi_mask[i] == 0) {
7731 regs[i] = 0; /* used later */
7732 continue;
7733 }
7734 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7735 dd->gi_mask[i];
7736 /* only clear if anything is set */
7737 if (regs[i])
7738 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7739 }
7740
7741 /* phase 2: call the appropriate handler */
7742 for_each_set_bit(bit, (unsigned long *)&regs[0],
7743 CCE_NUM_INT_CSRS*64) {
7744 is_interrupt(dd, bit);
7745 }
7746
7747 return IRQ_HANDLED;
7748}
7749
7750static irqreturn_t sdma_interrupt(int irq, void *data)
7751{
7752 struct sdma_engine *sde = data;
7753 struct hfi1_devdata *dd = sde->dd;
7754 u64 status;
7755
7756#ifdef CONFIG_SDMA_VERBOSITY
7757 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7758 slashstrip(__FILE__), __LINE__, __func__);
7759 sdma_dumpstate(sde);
7760#endif
7761
7762 this_cpu_inc(*dd->int_counter);
7763
7764 /* This read_csr is really bad in the hot path */
7765 status = read_csr(dd,
7766 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7767 & sde->imask;
7768 if (likely(status)) {
7769 /* clear the interrupt(s) */
7770 write_csr(dd,
7771 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7772 status);
7773
7774 /* handle the interrupt(s) */
7775 sdma_engine_interrupt(sde, status);
7776 } else
7777 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7778 sde->this_idx);
7779
7780 return IRQ_HANDLED;
7781}
7782
7783/*
Dean Luickf4f30031c2015-10-26 10:28:44 -04007784 * Clear the receive interrupt, forcing the write and making sure
7785 * we have data from the chip, pushing everything in front of it
7786 * back to the host.
7787 */
7788static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7789{
7790 struct hfi1_devdata *dd = rcd->dd;
7791 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7792
7793 mmiowb(); /* make sure everything before is written */
7794 write_csr(dd, addr, rcd->imask);
7795 /* force the above write on the chip and get a value back */
7796 (void)read_csr(dd, addr);
7797}
7798
7799/* force the receive interrupt */
7800static inline void force_recv_intr(struct hfi1_ctxtdata *rcd)
7801{
7802 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7803}
7804
7805/* return non-zero if a packet is present */
7806static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7807{
7808 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7809 return (rcd->seq_cnt ==
7810 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7811
7812 /* else is RDMA rtail */
7813 return (rcd->head != get_rcvhdrtail(rcd));
7814}
7815
7816/*
7817 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
7818 * This routine will try to handle packets immediately (latency), but if
7819 * it finds too many, it will invoke the thread handler (bandwitdh). The
7820 * chip receive interupt is *not* cleared down until this or the thread (if
7821 * invoked) is finished. The intent is to avoid extra interrupts while we
7822 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04007823 */
7824static irqreturn_t receive_context_interrupt(int irq, void *data)
7825{
7826 struct hfi1_ctxtdata *rcd = data;
7827 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04007828 int disposition;
7829 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007830
7831 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7832 this_cpu_inc(*dd->int_counter);
7833
Dean Luickf4f30031c2015-10-26 10:28:44 -04007834 /* receive interrupt remains blocked while processing packets */
7835 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007836
Dean Luickf4f30031c2015-10-26 10:28:44 -04007837 /*
7838 * Too many packets were seen while processing packets in this
7839 * IRQ handler. Invoke the handler thread. The receive interrupt
7840 * remains blocked.
7841 */
7842 if (disposition == RCV_PKT_LIMIT)
7843 return IRQ_WAKE_THREAD;
7844
7845 /*
7846 * The packet processor detected no more packets. Clear the receive
7847 * interrupt and recheck for a packet packet that may have arrived
7848 * after the previous check and interrupt clear. If a packet arrived,
7849 * force another interrupt.
7850 */
7851 clear_recv_intr(rcd);
7852 present = check_packet_present(rcd);
7853 if (present)
7854 force_recv_intr(rcd);
7855
7856 return IRQ_HANDLED;
7857}
7858
7859/*
7860 * Receive packet thread handler. This expects to be invoked with the
7861 * receive interrupt still blocked.
7862 */
7863static irqreturn_t receive_context_thread(int irq, void *data)
7864{
7865 struct hfi1_ctxtdata *rcd = data;
7866 int present;
7867
7868 /* receive interrupt is still blocked from the IRQ handler */
7869 (void)rcd->do_interrupt(rcd, 1);
7870
7871 /*
7872 * The packet processor will only return if it detected no more
7873 * packets. Hold IRQs here so we can safely clear the interrupt and
7874 * recheck for a packet that may have arrived after the previous
7875 * check and the interrupt clear. If a packet arrived, force another
7876 * interrupt.
7877 */
7878 local_irq_disable();
7879 clear_recv_intr(rcd);
7880 present = check_packet_present(rcd);
7881 if (present)
7882 force_recv_intr(rcd);
7883 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04007884
7885 return IRQ_HANDLED;
7886}
7887
7888/* ========================================================================= */
7889
7890u32 read_physical_state(struct hfi1_devdata *dd)
7891{
7892 u64 reg;
7893
7894 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
7895 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
7896 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
7897}
7898
7899static u32 read_logical_state(struct hfi1_devdata *dd)
7900{
7901 u64 reg;
7902
7903 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7904 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
7905 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
7906}
7907
7908static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
7909{
7910 u64 reg;
7911
7912 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7913 /* clear current state, set new state */
7914 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
7915 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
7916 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
7917}
7918
7919/*
7920 * Use the 8051 to read a LCB CSR.
7921 */
7922static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
7923{
7924 u32 regno;
7925 int ret;
7926
7927 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
7928 if (acquire_lcb_access(dd, 0) == 0) {
7929 *data = read_csr(dd, addr);
7930 release_lcb_access(dd, 0);
7931 return 0;
7932 }
7933 return -EBUSY;
7934 }
7935
7936 /* register is an index of LCB registers: (offset - base) / 8 */
7937 regno = (addr - DC_LCB_CFG_RUN) >> 3;
7938 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
7939 if (ret != HCMD_SUCCESS)
7940 return -EBUSY;
7941 return 0;
7942}
7943
7944/*
7945 * Read an LCB CSR. Access may not be in host control, so check.
7946 * Return 0 on success, -EBUSY on failure.
7947 */
7948int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
7949{
7950 struct hfi1_pportdata *ppd = dd->pport;
7951
7952 /* if up, go through the 8051 for the value */
7953 if (ppd->host_link_state & HLS_UP)
7954 return read_lcb_via_8051(dd, addr, data);
7955 /* if going up or down, no access */
7956 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
7957 return -EBUSY;
7958 /* otherwise, host has access */
7959 *data = read_csr(dd, addr);
7960 return 0;
7961}
7962
7963/*
7964 * Use the 8051 to write a LCB CSR.
7965 */
7966static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
7967{
Dean Luick3bf40d62015-11-06 20:07:04 -05007968 u32 regno;
7969 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007970
Dean Luick3bf40d62015-11-06 20:07:04 -05007971 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
7972 (dd->dc8051_ver < dc8051_ver(0, 20))) {
7973 if (acquire_lcb_access(dd, 0) == 0) {
7974 write_csr(dd, addr, data);
7975 release_lcb_access(dd, 0);
7976 return 0;
7977 }
7978 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007979 }
Dean Luick3bf40d62015-11-06 20:07:04 -05007980
7981 /* register is an index of LCB registers: (offset - base) / 8 */
7982 regno = (addr - DC_LCB_CFG_RUN) >> 3;
7983 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
7984 if (ret != HCMD_SUCCESS)
7985 return -EBUSY;
7986 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007987}
7988
7989/*
7990 * Write an LCB CSR. Access may not be in host control, so check.
7991 * Return 0 on success, -EBUSY on failure.
7992 */
7993int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
7994{
7995 struct hfi1_pportdata *ppd = dd->pport;
7996
7997 /* if up, go through the 8051 for the value */
7998 if (ppd->host_link_state & HLS_UP)
7999 return write_lcb_via_8051(dd, addr, data);
8000 /* if going up or down, no access */
8001 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8002 return -EBUSY;
8003 /* otherwise, host has access */
8004 write_csr(dd, addr, data);
8005 return 0;
8006}
8007
8008/*
8009 * Returns:
8010 * < 0 = Linux error, not able to get access
8011 * > 0 = 8051 command RETURN_CODE
8012 */
8013static int do_8051_command(
8014 struct hfi1_devdata *dd,
8015 u32 type,
8016 u64 in_data,
8017 u64 *out_data)
8018{
8019 u64 reg, completed;
8020 int return_code;
8021 unsigned long flags;
8022 unsigned long timeout;
8023
8024 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8025
8026 /*
8027 * Alternative to holding the lock for a long time:
8028 * - keep busy wait - have other users bounce off
8029 */
8030 spin_lock_irqsave(&dd->dc8051_lock, flags);
8031
8032 /* We can't send any commands to the 8051 if it's in reset */
8033 if (dd->dc_shutdown) {
8034 return_code = -ENODEV;
8035 goto fail;
8036 }
8037
8038 /*
8039 * If an 8051 host command timed out previously, then the 8051 is
8040 * stuck.
8041 *
8042 * On first timeout, attempt to reset and restart the entire DC
8043 * block (including 8051). (Is this too big of a hammer?)
8044 *
8045 * If the 8051 times out a second time, the reset did not bring it
8046 * back to healthy life. In that case, fail any subsequent commands.
8047 */
8048 if (dd->dc8051_timed_out) {
8049 if (dd->dc8051_timed_out > 1) {
8050 dd_dev_err(dd,
8051 "Previous 8051 host command timed out, skipping command %u\n",
8052 type);
8053 return_code = -ENXIO;
8054 goto fail;
8055 }
8056 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8057 dc_shutdown(dd);
8058 dc_start(dd);
8059 spin_lock_irqsave(&dd->dc8051_lock, flags);
8060 }
8061
8062 /*
8063 * If there is no timeout, then the 8051 command interface is
8064 * waiting for a command.
8065 */
8066
8067 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008068 * When writing a LCB CSR, out_data contains the full value to
8069 * to be written, while in_data contains the relative LCB
8070 * address in 7:0. Do the work here, rather than the caller,
8071 * of distrubting the write data to where it needs to go:
8072 *
8073 * Write data
8074 * 39:00 -> in_data[47:8]
8075 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8076 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8077 */
8078 if (type == HCMD_WRITE_LCB_CSR) {
8079 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8080 reg = ((((*out_data) >> 40) & 0xff) <<
8081 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8082 | ((((*out_data) >> 48) & 0xffff) <<
8083 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8084 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8085 }
8086
8087 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008088 * Do two writes: the first to stabilize the type and req_data, the
8089 * second to activate.
8090 */
8091 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8092 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8093 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8094 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8095 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8096 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8097 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8098
8099 /* wait for completion, alternate: interrupt */
8100 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8101 while (1) {
8102 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8103 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8104 if (completed)
8105 break;
8106 if (time_after(jiffies, timeout)) {
8107 dd->dc8051_timed_out++;
8108 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8109 if (out_data)
8110 *out_data = 0;
8111 return_code = -ETIMEDOUT;
8112 goto fail;
8113 }
8114 udelay(2);
8115 }
8116
8117 if (out_data) {
8118 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8119 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8120 if (type == HCMD_READ_LCB_CSR) {
8121 /* top 16 bits are in a different register */
8122 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8123 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8124 << (48
8125 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8126 }
8127 }
8128 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8129 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8130 dd->dc8051_timed_out = 0;
8131 /*
8132 * Clear command for next user.
8133 */
8134 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8135
8136fail:
8137 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8138
8139 return return_code;
8140}
8141
8142static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8143{
8144 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8145}
8146
8147static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8148 u8 lane_id, u32 config_data)
8149{
8150 u64 data;
8151 int ret;
8152
8153 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8154 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8155 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8156 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8157 if (ret != HCMD_SUCCESS) {
8158 dd_dev_err(dd,
8159 "load 8051 config: field id %d, lane %d, err %d\n",
8160 (int)field_id, (int)lane_id, ret);
8161 }
8162 return ret;
8163}
8164
8165/*
8166 * Read the 8051 firmware "registers". Use the RAM directly. Always
8167 * set the result, even on error.
8168 * Return 0 on success, -errno on failure
8169 */
8170static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8171 u32 *result)
8172{
8173 u64 big_data;
8174 u32 addr;
8175 int ret;
8176
8177 /* address start depends on the lane_id */
8178 if (lane_id < 4)
8179 addr = (4 * NUM_GENERAL_FIELDS)
8180 + (lane_id * 4 * NUM_LANE_FIELDS);
8181 else
8182 addr = 0;
8183 addr += field_id * 4;
8184
8185 /* read is in 8-byte chunks, hardware will truncate the address down */
8186 ret = read_8051_data(dd, addr, 8, &big_data);
8187
8188 if (ret == 0) {
8189 /* extract the 4 bytes we want */
8190 if (addr & 0x4)
8191 *result = (u32)(big_data >> 32);
8192 else
8193 *result = (u32)big_data;
8194 } else {
8195 *result = 0;
8196 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8197 __func__, lane_id, field_id);
8198 }
8199
8200 return ret;
8201}
8202
8203static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8204 u8 continuous)
8205{
8206 u32 frame;
8207
8208 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8209 | power_management << POWER_MANAGEMENT_SHIFT;
8210 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8211 GENERAL_CONFIG, frame);
8212}
8213
8214static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8215 u16 vl15buf, u8 crc_sizes)
8216{
8217 u32 frame;
8218
8219 frame = (u32)vau << VAU_SHIFT
8220 | (u32)z << Z_SHIFT
8221 | (u32)vcu << VCU_SHIFT
8222 | (u32)vl15buf << VL15BUF_SHIFT
8223 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8224 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8225 GENERAL_CONFIG, frame);
8226}
8227
8228static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8229 u8 *flag_bits, u16 *link_widths)
8230{
8231 u32 frame;
8232
8233 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8234 &frame);
8235 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8236 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8237 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8238}
8239
8240static int write_vc_local_link_width(struct hfi1_devdata *dd,
8241 u8 misc_bits,
8242 u8 flag_bits,
8243 u16 link_widths)
8244{
8245 u32 frame;
8246
8247 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8248 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8249 | (u32)link_widths << LINK_WIDTH_SHIFT;
8250 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8251 frame);
8252}
8253
8254static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8255 u8 device_rev)
8256{
8257 u32 frame;
8258
8259 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8260 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8261 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8262}
8263
8264static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8265 u8 *device_rev)
8266{
8267 u32 frame;
8268
8269 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8270 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8271 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8272 & REMOTE_DEVICE_REV_MASK;
8273}
8274
8275void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8276{
8277 u32 frame;
8278
8279 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8280 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8281 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8282}
8283
8284static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8285 u8 *continuous)
8286{
8287 u32 frame;
8288
8289 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8290 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8291 & POWER_MANAGEMENT_MASK;
8292 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8293 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8294}
8295
8296static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8297 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8298{
8299 u32 frame;
8300
8301 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8302 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8303 *z = (frame >> Z_SHIFT) & Z_MASK;
8304 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8305 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8306 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8307}
8308
8309static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8310 u8 *remote_tx_rate,
8311 u16 *link_widths)
8312{
8313 u32 frame;
8314
8315 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8316 &frame);
8317 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8318 & REMOTE_TX_RATE_MASK;
8319 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8320}
8321
8322static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8323{
8324 u32 frame;
8325
8326 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8327 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8328}
8329
8330static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8331{
8332 u32 frame;
8333
8334 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8335 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8336}
8337
8338static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8339{
8340 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8341}
8342
8343static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8344{
8345 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8346}
8347
8348void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8349{
8350 u32 frame;
8351 int ret;
8352
8353 *link_quality = 0;
8354 if (dd->pport->host_link_state & HLS_UP) {
8355 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8356 &frame);
8357 if (ret == 0)
8358 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8359 & LINK_QUALITY_MASK;
8360 }
8361}
8362
8363static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8364{
8365 u32 frame;
8366
8367 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8368 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8369}
8370
8371static int read_tx_settings(struct hfi1_devdata *dd,
8372 u8 *enable_lane_tx,
8373 u8 *tx_polarity_inversion,
8374 u8 *rx_polarity_inversion,
8375 u8 *max_rate)
8376{
8377 u32 frame;
8378 int ret;
8379
8380 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8381 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8382 & ENABLE_LANE_TX_MASK;
8383 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8384 & TX_POLARITY_INVERSION_MASK;
8385 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8386 & RX_POLARITY_INVERSION_MASK;
8387 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8388 return ret;
8389}
8390
8391static int write_tx_settings(struct hfi1_devdata *dd,
8392 u8 enable_lane_tx,
8393 u8 tx_polarity_inversion,
8394 u8 rx_polarity_inversion,
8395 u8 max_rate)
8396{
8397 u32 frame;
8398
8399 /* no need to mask, all variable sizes match field widths */
8400 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8401 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8402 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8403 | max_rate << MAX_RATE_SHIFT;
8404 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8405}
8406
8407static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8408{
8409 u32 frame, version, prod_id;
8410 int ret, lane;
8411
8412 /* 4 lanes */
8413 for (lane = 0; lane < 4; lane++) {
8414 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8415 if (ret) {
8416 dd_dev_err(
8417 dd,
8418 "Unable to read lane %d firmware details\n",
8419 lane);
8420 continue;
8421 }
8422 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8423 & SPICO_ROM_VERSION_MASK;
8424 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8425 & SPICO_ROM_PROD_ID_MASK;
8426 dd_dev_info(dd,
8427 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8428 lane, version, prod_id);
8429 }
8430}
8431
8432/*
8433 * Read an idle LCB message.
8434 *
8435 * Returns 0 on success, -EINVAL on error
8436 */
8437static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8438{
8439 int ret;
8440
8441 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8442 type, data_out);
8443 if (ret != HCMD_SUCCESS) {
8444 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8445 (u32)type, ret);
8446 return -EINVAL;
8447 }
8448 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8449 /* return only the payload as we already know the type */
8450 *data_out >>= IDLE_PAYLOAD_SHIFT;
8451 return 0;
8452}
8453
8454/*
8455 * Read an idle SMA message. To be done in response to a notification from
8456 * the 8051.
8457 *
8458 * Returns 0 on success, -EINVAL on error
8459 */
8460static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8461{
8462 return read_idle_message(dd,
8463 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8464}
8465
8466/*
8467 * Send an idle LCB message.
8468 *
8469 * Returns 0 on success, -EINVAL on error
8470 */
8471static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8472{
8473 int ret;
8474
8475 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8476 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8477 if (ret != HCMD_SUCCESS) {
8478 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8479 data, ret);
8480 return -EINVAL;
8481 }
8482 return 0;
8483}
8484
8485/*
8486 * Send an idle SMA message.
8487 *
8488 * Returns 0 on success, -EINVAL on error
8489 */
8490int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8491{
8492 u64 data;
8493
8494 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8495 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8496 return send_idle_message(dd, data);
8497}
8498
8499/*
8500 * Initialize the LCB then do a quick link up. This may or may not be
8501 * in loopback.
8502 *
8503 * return 0 on success, -errno on error
8504 */
8505static int do_quick_linkup(struct hfi1_devdata *dd)
8506{
8507 u64 reg;
8508 unsigned long timeout;
8509 int ret;
8510
8511 lcb_shutdown(dd, 0);
8512
8513 if (loopback) {
8514 /* LCB_CFG_LOOPBACK.VAL = 2 */
8515 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8516 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8517 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8518 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8519 }
8520
8521 /* start the LCBs */
8522 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8523 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8524
8525 /* simulator only loopback steps */
8526 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8527 /* LCB_CFG_RUN.EN = 1 */
8528 write_csr(dd, DC_LCB_CFG_RUN,
8529 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8530
8531 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8532 timeout = jiffies + msecs_to_jiffies(10);
8533 while (1) {
8534 reg = read_csr(dd,
8535 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8536 if (reg)
8537 break;
8538 if (time_after(jiffies, timeout)) {
8539 dd_dev_err(dd,
8540 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8541 return -ETIMEDOUT;
8542 }
8543 udelay(2);
8544 }
8545
8546 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8547 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8548 }
8549
8550 if (!loopback) {
8551 /*
8552 * When doing quick linkup and not in loopback, both
8553 * sides must be done with LCB set-up before either
8554 * starts the quick linkup. Put a delay here so that
8555 * both sides can be started and have a chance to be
8556 * done with LCB set up before resuming.
8557 */
8558 dd_dev_err(dd,
8559 "Pausing for peer to be finished with LCB set up\n");
8560 msleep(5000);
8561 dd_dev_err(dd,
8562 "Continuing with quick linkup\n");
8563 }
8564
8565 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8566 set_8051_lcb_access(dd);
8567
8568 /*
8569 * State "quick" LinkUp request sets the physical link state to
8570 * LinkUp without a verify capability sequence.
8571 * This state is in simulator v37 and later.
8572 */
8573 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8574 if (ret != HCMD_SUCCESS) {
8575 dd_dev_err(dd,
8576 "%s: set physical link state to quick LinkUp failed with return %d\n",
8577 __func__, ret);
8578
8579 set_host_lcb_access(dd);
8580 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8581
8582 if (ret >= 0)
8583 ret = -EINVAL;
8584 return ret;
8585 }
8586
8587 return 0; /* success */
8588}
8589
8590/*
8591 * Set the SerDes to internal loopback mode.
8592 * Returns 0 on success, -errno on error.
8593 */
8594static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8595{
8596 int ret;
8597
8598 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8599 if (ret == HCMD_SUCCESS)
8600 return 0;
8601 dd_dev_err(dd,
8602 "Set physical link state to SerDes Loopback failed with return %d\n",
8603 ret);
8604 if (ret >= 0)
8605 ret = -EINVAL;
8606 return ret;
8607}
8608
8609/*
8610 * Do all special steps to set up loopback.
8611 */
8612static int init_loopback(struct hfi1_devdata *dd)
8613{
8614 dd_dev_info(dd, "Entering loopback mode\n");
8615
8616 /* all loopbacks should disable self GUID check */
8617 write_csr(dd, DC_DC8051_CFG_MODE,
8618 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8619
8620 /*
8621 * The simulator has only one loopback option - LCB. Switch
8622 * to that option, which includes quick link up.
8623 *
8624 * Accept all valid loopback values.
8625 */
8626 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8627 && (loopback == LOOPBACK_SERDES
8628 || loopback == LOOPBACK_LCB
8629 || loopback == LOOPBACK_CABLE)) {
8630 loopback = LOOPBACK_LCB;
8631 quick_linkup = 1;
8632 return 0;
8633 }
8634
8635 /* handle serdes loopback */
8636 if (loopback == LOOPBACK_SERDES) {
8637 /* internal serdes loopack needs quick linkup on RTL */
8638 if (dd->icode == ICODE_RTL_SILICON)
8639 quick_linkup = 1;
8640 return set_serdes_loopback_mode(dd);
8641 }
8642
8643 /* LCB loopback - handled at poll time */
8644 if (loopback == LOOPBACK_LCB) {
8645 quick_linkup = 1; /* LCB is always quick linkup */
8646
8647 /* not supported in emulation due to emulation RTL changes */
8648 if (dd->icode == ICODE_FPGA_EMULATION) {
8649 dd_dev_err(dd,
8650 "LCB loopback not supported in emulation\n");
8651 return -EINVAL;
8652 }
8653 return 0;
8654 }
8655
8656 /* external cable loopback requires no extra steps */
8657 if (loopback == LOOPBACK_CABLE)
8658 return 0;
8659
8660 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8661 return -EINVAL;
8662}
8663
8664/*
8665 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8666 * used in the Verify Capability link width attribute.
8667 */
8668static u16 opa_to_vc_link_widths(u16 opa_widths)
8669{
8670 int i;
8671 u16 result = 0;
8672
8673 static const struct link_bits {
8674 u16 from;
8675 u16 to;
8676 } opa_link_xlate[] = {
8677 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8678 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8679 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8680 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8681 };
8682
8683 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8684 if (opa_widths & opa_link_xlate[i].from)
8685 result |= opa_link_xlate[i].to;
8686 }
8687 return result;
8688}
8689
8690/*
8691 * Set link attributes before moving to polling.
8692 */
8693static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8694{
8695 struct hfi1_devdata *dd = ppd->dd;
8696 u8 enable_lane_tx;
8697 u8 tx_polarity_inversion;
8698 u8 rx_polarity_inversion;
8699 int ret;
8700
8701 /* reset our fabric serdes to clear any lingering problems */
8702 fabric_serdes_reset(dd);
8703
8704 /* set the local tx rate - need to read-modify-write */
8705 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8706 &rx_polarity_inversion, &ppd->local_tx_rate);
8707 if (ret)
8708 goto set_local_link_attributes_fail;
8709
8710 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8711 /* set the tx rate to the fastest enabled */
8712 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8713 ppd->local_tx_rate = 1;
8714 else
8715 ppd->local_tx_rate = 0;
8716 } else {
8717 /* set the tx rate to all enabled */
8718 ppd->local_tx_rate = 0;
8719 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8720 ppd->local_tx_rate |= 2;
8721 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8722 ppd->local_tx_rate |= 1;
8723 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008724
8725 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008726 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8727 rx_polarity_inversion, ppd->local_tx_rate);
8728 if (ret != HCMD_SUCCESS)
8729 goto set_local_link_attributes_fail;
8730
8731 /*
8732 * DC supports continuous updates.
8733 */
8734 ret = write_vc_local_phy(dd, 0 /* no power management */,
8735 1 /* continuous updates */);
8736 if (ret != HCMD_SUCCESS)
8737 goto set_local_link_attributes_fail;
8738
8739 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8740 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8741 ppd->port_crc_mode_enabled);
8742 if (ret != HCMD_SUCCESS)
8743 goto set_local_link_attributes_fail;
8744
8745 ret = write_vc_local_link_width(dd, 0, 0,
8746 opa_to_vc_link_widths(ppd->link_width_enabled));
8747 if (ret != HCMD_SUCCESS)
8748 goto set_local_link_attributes_fail;
8749
8750 /* let peer know who we are */
8751 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8752 if (ret == HCMD_SUCCESS)
8753 return 0;
8754
8755set_local_link_attributes_fail:
8756 dd_dev_err(dd,
8757 "Failed to set local link attributes, return 0x%x\n",
8758 ret);
8759 return ret;
8760}
8761
8762/*
8763 * Call this to start the link. Schedule a retry if the cable is not
8764 * present or if unable to start polling. Do not do anything if the
8765 * link is disabled. Returns 0 if link is disabled or moved to polling
8766 */
8767int start_link(struct hfi1_pportdata *ppd)
8768{
8769 if (!ppd->link_enabled) {
8770 dd_dev_info(ppd->dd,
8771 "%s: stopping link start because link is disabled\n",
8772 __func__);
8773 return 0;
8774 }
8775 if (!ppd->driver_link_ready) {
8776 dd_dev_info(ppd->dd,
8777 "%s: stopping link start because driver is not ready\n",
8778 __func__);
8779 return 0;
8780 }
8781
8782 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8783 loopback == LOOPBACK_LCB ||
8784 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8785 return set_link_state(ppd, HLS_DN_POLL);
8786
8787 dd_dev_info(ppd->dd,
8788 "%s: stopping link start because no cable is present\n",
8789 __func__);
8790 return -EAGAIN;
8791}
8792
8793static void reset_qsfp(struct hfi1_pportdata *ppd)
8794{
8795 struct hfi1_devdata *dd = ppd->dd;
8796 u64 mask, qsfp_mask;
8797
8798 mask = (u64)QSFP_HFI0_RESET_N;
8799 qsfp_mask = read_csr(dd,
8800 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
8801 qsfp_mask |= mask;
8802 write_csr(dd,
8803 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
8804 qsfp_mask);
8805
8806 qsfp_mask = read_csr(dd,
8807 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
8808 qsfp_mask &= ~mask;
8809 write_csr(dd,
8810 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8811 qsfp_mask);
8812
8813 udelay(10);
8814
8815 qsfp_mask |= mask;
8816 write_csr(dd,
8817 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8818 qsfp_mask);
8819}
8820
8821static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
8822 u8 *qsfp_interrupt_status)
8823{
8824 struct hfi1_devdata *dd = ppd->dd;
8825
8826 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
8827 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
8828 dd_dev_info(dd,
8829 "%s: QSFP cable on fire\n",
8830 __func__);
8831
8832 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
8833 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
8834 dd_dev_info(dd,
8835 "%s: QSFP cable temperature too low\n",
8836 __func__);
8837
8838 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
8839 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
8840 dd_dev_info(dd,
8841 "%s: QSFP supply voltage too high\n",
8842 __func__);
8843
8844 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
8845 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
8846 dd_dev_info(dd,
8847 "%s: QSFP supply voltage too low\n",
8848 __func__);
8849
8850 /* Byte 2 is vendor specific */
8851
8852 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
8853 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
8854 dd_dev_info(dd,
8855 "%s: Cable RX channel 1/2 power too high\n",
8856 __func__);
8857
8858 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
8859 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
8860 dd_dev_info(dd,
8861 "%s: Cable RX channel 1/2 power too low\n",
8862 __func__);
8863
8864 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
8865 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
8866 dd_dev_info(dd,
8867 "%s: Cable RX channel 3/4 power too high\n",
8868 __func__);
8869
8870 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
8871 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
8872 dd_dev_info(dd,
8873 "%s: Cable RX channel 3/4 power too low\n",
8874 __func__);
8875
8876 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
8877 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
8878 dd_dev_info(dd,
8879 "%s: Cable TX channel 1/2 bias too high\n",
8880 __func__);
8881
8882 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
8883 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
8884 dd_dev_info(dd,
8885 "%s: Cable TX channel 1/2 bias too low\n",
8886 __func__);
8887
8888 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
8889 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
8890 dd_dev_info(dd,
8891 "%s: Cable TX channel 3/4 bias too high\n",
8892 __func__);
8893
8894 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
8895 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
8896 dd_dev_info(dd,
8897 "%s: Cable TX channel 3/4 bias too low\n",
8898 __func__);
8899
8900 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
8901 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
8902 dd_dev_info(dd,
8903 "%s: Cable TX channel 1/2 power too high\n",
8904 __func__);
8905
8906 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
8907 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
8908 dd_dev_info(dd,
8909 "%s: Cable TX channel 1/2 power too low\n",
8910 __func__);
8911
8912 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
8913 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
8914 dd_dev_info(dd,
8915 "%s: Cable TX channel 3/4 power too high\n",
8916 __func__);
8917
8918 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
8919 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
8920 dd_dev_info(dd,
8921 "%s: Cable TX channel 3/4 power too low\n",
8922 __func__);
8923
8924 /* Bytes 9-10 and 11-12 are reserved */
8925 /* Bytes 13-15 are vendor specific */
8926
8927 return 0;
8928}
8929
8930static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
8931{
8932 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
8933
8934 return 0;
8935}
8936
8937static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
8938{
8939 struct hfi1_devdata *dd = ppd->dd;
8940 u8 qsfp_interrupt_status = 0;
8941
8942 if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
8943 != 1) {
8944 dd_dev_info(dd,
8945 "%s: Failed to read status of QSFP module\n",
8946 __func__);
8947 return -EIO;
8948 }
8949
8950 /* We don't care about alarms & warnings with a non-functional INT_N */
8951 if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
8952 do_pre_lni_host_behaviors(ppd);
8953
8954 return 0;
8955}
8956
8957/* This routine will only be scheduled if the QSFP module is present */
8958static void qsfp_event(struct work_struct *work)
8959{
8960 struct qsfp_data *qd;
8961 struct hfi1_pportdata *ppd;
8962 struct hfi1_devdata *dd;
8963
8964 qd = container_of(work, struct qsfp_data, qsfp_work);
8965 ppd = qd->ppd;
8966 dd = ppd->dd;
8967
8968 /* Sanity check */
8969 if (!qsfp_mod_present(ppd))
8970 return;
8971
8972 /*
8973 * Turn DC back on after cables has been
8974 * re-inserted. Up until now, the DC has been in
8975 * reset to save power.
8976 */
8977 dc_start(dd);
8978
8979 if (qd->cache_refresh_required) {
8980 msleep(3000);
8981 reset_qsfp(ppd);
8982
8983 /* Check for QSFP interrupt after t_init (SFF 8679)
8984 * + extra
8985 */
8986 msleep(3000);
8987 if (!qd->qsfp_interrupt_functional) {
8988 if (do_qsfp_intr_fallback(ppd) < 0)
8989 dd_dev_info(dd, "%s: QSFP fallback failed\n",
8990 __func__);
8991 ppd->driver_link_ready = 1;
8992 start_link(ppd);
8993 }
8994 }
8995
8996 if (qd->check_interrupt_flags) {
8997 u8 qsfp_interrupt_status[16] = {0,};
8998
8999 if (qsfp_read(ppd, dd->hfi1_id, 6,
9000 &qsfp_interrupt_status[0], 16) != 16) {
9001 dd_dev_info(dd,
9002 "%s: Failed to read status of QSFP module\n",
9003 __func__);
9004 } else {
9005 unsigned long flags;
9006 u8 data_status;
9007
9008 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9009 ppd->qsfp_info.check_interrupt_flags = 0;
9010 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9011 flags);
9012
9013 if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
9014 != 1) {
9015 dd_dev_info(dd,
9016 "%s: Failed to read status of QSFP module\n",
9017 __func__);
9018 }
9019 if (!(data_status & QSFP_DATA_NOT_READY)) {
9020 do_pre_lni_host_behaviors(ppd);
9021 start_link(ppd);
9022 } else
9023 handle_qsfp_error_conditions(ppd,
9024 qsfp_interrupt_status);
9025 }
9026 }
9027}
9028
9029void init_qsfp(struct hfi1_pportdata *ppd)
9030{
9031 struct hfi1_devdata *dd = ppd->dd;
9032 u64 qsfp_mask;
9033
9034 if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
Easwar Hariharan3c2f85b2015-10-26 10:28:31 -04009035 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009036 ppd->driver_link_ready = 1;
9037 return;
9038 }
9039
9040 ppd->qsfp_info.ppd = ppd;
9041 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
9042
9043 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9044 /* Clear current status to avoid spurious interrupts */
9045 write_csr(dd,
9046 dd->hfi1_id ?
9047 ASIC_QSFP2_CLEAR :
9048 ASIC_QSFP1_CLEAR,
9049 qsfp_mask);
9050
9051 /* Handle active low nature of INT_N and MODPRST_N pins */
9052 if (qsfp_mod_present(ppd))
9053 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9054 write_csr(dd,
9055 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9056 qsfp_mask);
9057
9058 /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
9059 qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
9060 write_csr(dd,
9061 dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9062 qsfp_mask);
9063
9064 if (qsfp_mod_present(ppd)) {
9065 msleep(3000);
9066 reset_qsfp(ppd);
9067
9068 /* Check for QSFP interrupt after t_init (SFF 8679)
9069 * + extra
9070 */
9071 msleep(3000);
9072 if (!ppd->qsfp_info.qsfp_interrupt_functional) {
9073 if (do_qsfp_intr_fallback(ppd) < 0)
9074 dd_dev_info(dd,
9075 "%s: QSFP fallback failed\n",
9076 __func__);
9077 ppd->driver_link_ready = 1;
9078 }
9079 }
9080}
9081
Dean Luickbbdeb332015-12-01 15:38:15 -05009082/*
9083 * Do a one-time initialize of the LCB block.
9084 */
9085static void init_lcb(struct hfi1_devdata *dd)
9086{
9087 /* the DC has been reset earlier in the driver load */
9088
9089 /* set LCB for cclk loopback on the port */
9090 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9091 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9092 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9093 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9094 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9095 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9096 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9097}
9098
Mike Marciniszyn77241052015-07-30 15:17:43 -04009099int bringup_serdes(struct hfi1_pportdata *ppd)
9100{
9101 struct hfi1_devdata *dd = ppd->dd;
9102 u64 guid;
9103 int ret;
9104
9105 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9106 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9107
9108 guid = ppd->guid;
9109 if (!guid) {
9110 if (dd->base_guid)
9111 guid = dd->base_guid + ppd->port - 1;
9112 ppd->guid = guid;
9113 }
9114
9115 /* the link defaults to enabled */
9116 ppd->link_enabled = 1;
9117 /* Set linkinit_reason on power up per OPA spec */
9118 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9119
Dean Luickbbdeb332015-12-01 15:38:15 -05009120 /* one-time init of the LCB */
9121 init_lcb(dd);
9122
Mike Marciniszyn77241052015-07-30 15:17:43 -04009123 if (loopback) {
9124 ret = init_loopback(dd);
9125 if (ret < 0)
9126 return ret;
9127 }
9128
9129 return start_link(ppd);
9130}
9131
9132void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9133{
9134 struct hfi1_devdata *dd = ppd->dd;
9135
9136 /*
9137 * Shut down the link and keep it down. First turn off that the
9138 * driver wants to allow the link to be up (driver_link_ready).
9139 * Then make sure the link is not automatically restarted
9140 * (link_enabled). Cancel any pending restart. And finally
9141 * go offline.
9142 */
9143 ppd->driver_link_ready = 0;
9144 ppd->link_enabled = 0;
9145
9146 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9147 OPA_LINKDOWN_REASON_SMA_DISABLED);
9148 set_link_state(ppd, HLS_DN_OFFLINE);
9149
9150 /* disable the port */
9151 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9152}
9153
9154static inline int init_cpu_counters(struct hfi1_devdata *dd)
9155{
9156 struct hfi1_pportdata *ppd;
9157 int i;
9158
9159 ppd = (struct hfi1_pportdata *)(dd + 1);
9160 for (i = 0; i < dd->num_pports; i++, ppd++) {
9161 ppd->ibport_data.rc_acks = NULL;
9162 ppd->ibport_data.rc_qacks = NULL;
9163 ppd->ibport_data.rc_acks = alloc_percpu(u64);
9164 ppd->ibport_data.rc_qacks = alloc_percpu(u64);
9165 ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64);
9166 if ((ppd->ibport_data.rc_acks == NULL) ||
9167 (ppd->ibport_data.rc_delayed_comp == NULL) ||
9168 (ppd->ibport_data.rc_qacks == NULL))
9169 return -ENOMEM;
9170 }
9171
9172 return 0;
9173}
9174
9175static const char * const pt_names[] = {
9176 "expected",
9177 "eager",
9178 "invalid"
9179};
9180
9181static const char *pt_name(u32 type)
9182{
9183 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9184}
9185
9186/*
9187 * index is the index into the receive array
9188 */
9189void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9190 u32 type, unsigned long pa, u16 order)
9191{
9192 u64 reg;
9193 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9194 (dd->kregbase + RCV_ARRAY));
9195
9196 if (!(dd->flags & HFI1_PRESENT))
9197 goto done;
9198
9199 if (type == PT_INVALID) {
9200 pa = 0;
9201 } else if (type > PT_INVALID) {
9202 dd_dev_err(dd,
9203 "unexpected receive array type %u for index %u, not handled\n",
9204 type, index);
9205 goto done;
9206 }
9207
9208 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9209 pt_name(type), index, pa, (unsigned long)order);
9210
9211#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9212 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9213 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9214 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9215 << RCV_ARRAY_RT_ADDR_SHIFT;
9216 writeq(reg, base + (index * 8));
9217
9218 if (type == PT_EAGER)
9219 /*
9220 * Eager entries are written one-by-one so we have to push them
9221 * after we write the entry.
9222 */
9223 flush_wc();
9224done:
9225 return;
9226}
9227
9228void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9229{
9230 struct hfi1_devdata *dd = rcd->dd;
9231 u32 i;
9232
9233 /* this could be optimized */
9234 for (i = rcd->eager_base; i < rcd->eager_base +
9235 rcd->egrbufs.alloced; i++)
9236 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9237
9238 for (i = rcd->expected_base;
9239 i < rcd->expected_base + rcd->expected_count; i++)
9240 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9241}
9242
9243int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9244 struct hfi1_ctxt_info *kinfo)
9245{
9246 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9247 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9248 return 0;
9249}
9250
9251struct hfi1_message_header *hfi1_get_msgheader(
9252 struct hfi1_devdata *dd, __le32 *rhf_addr)
9253{
9254 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9255
9256 return (struct hfi1_message_header *)
9257 (rhf_addr - dd->rhf_offset + offset);
9258}
9259
9260static const char * const ib_cfg_name_strings[] = {
9261 "HFI1_IB_CFG_LIDLMC",
9262 "HFI1_IB_CFG_LWID_DG_ENB",
9263 "HFI1_IB_CFG_LWID_ENB",
9264 "HFI1_IB_CFG_LWID",
9265 "HFI1_IB_CFG_SPD_ENB",
9266 "HFI1_IB_CFG_SPD",
9267 "HFI1_IB_CFG_RXPOL_ENB",
9268 "HFI1_IB_CFG_LREV_ENB",
9269 "HFI1_IB_CFG_LINKLATENCY",
9270 "HFI1_IB_CFG_HRTBT",
9271 "HFI1_IB_CFG_OP_VLS",
9272 "HFI1_IB_CFG_VL_HIGH_CAP",
9273 "HFI1_IB_CFG_VL_LOW_CAP",
9274 "HFI1_IB_CFG_OVERRUN_THRESH",
9275 "HFI1_IB_CFG_PHYERR_THRESH",
9276 "HFI1_IB_CFG_LINKDEFAULT",
9277 "HFI1_IB_CFG_PKEYS",
9278 "HFI1_IB_CFG_MTU",
9279 "HFI1_IB_CFG_LSTATE",
9280 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9281 "HFI1_IB_CFG_PMA_TICKS",
9282 "HFI1_IB_CFG_PORT"
9283};
9284
9285static const char *ib_cfg_name(int which)
9286{
9287 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9288 return "invalid";
9289 return ib_cfg_name_strings[which];
9290}
9291
9292int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9293{
9294 struct hfi1_devdata *dd = ppd->dd;
9295 int val = 0;
9296
9297 switch (which) {
9298 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9299 val = ppd->link_width_enabled;
9300 break;
9301 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9302 val = ppd->link_width_active;
9303 break;
9304 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9305 val = ppd->link_speed_enabled;
9306 break;
9307 case HFI1_IB_CFG_SPD: /* current Link speed */
9308 val = ppd->link_speed_active;
9309 break;
9310
9311 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9312 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9313 case HFI1_IB_CFG_LINKLATENCY:
9314 goto unimplemented;
9315
9316 case HFI1_IB_CFG_OP_VLS:
9317 val = ppd->vls_operational;
9318 break;
9319 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9320 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9321 break;
9322 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9323 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9324 break;
9325 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9326 val = ppd->overrun_threshold;
9327 break;
9328 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9329 val = ppd->phy_error_threshold;
9330 break;
9331 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9332 val = dd->link_default;
9333 break;
9334
9335 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9336 case HFI1_IB_CFG_PMA_TICKS:
9337 default:
9338unimplemented:
9339 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9340 dd_dev_info(
9341 dd,
9342 "%s: which %s: not implemented\n",
9343 __func__,
9344 ib_cfg_name(which));
9345 break;
9346 }
9347
9348 return val;
9349}
9350
9351/*
9352 * The largest MAD packet size.
9353 */
9354#define MAX_MAD_PACKET 2048
9355
9356/*
9357 * Return the maximum header bytes that can go on the _wire_
9358 * for this device. This count includes the ICRC which is
9359 * not part of the packet held in memory but it is appended
9360 * by the HW.
9361 * This is dependent on the device's receive header entry size.
9362 * HFI allows this to be set per-receive context, but the
9363 * driver presently enforces a global value.
9364 */
9365u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9366{
9367 /*
9368 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9369 * the Receive Header Entry Size minus the PBC (or RHF) size
9370 * plus one DW for the ICRC appended by HW.
9371 *
9372 * dd->rcd[0].rcvhdrqentsize is in DW.
9373 * We use rcd[0] as all context will have the same value. Also,
9374 * the first kernel context would have been allocated by now so
9375 * we are guaranteed a valid value.
9376 */
9377 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9378}
9379
9380/*
9381 * Set Send Length
9382 * @ppd - per port data
9383 *
9384 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9385 * registers compare against LRH.PktLen, so use the max bytes included
9386 * in the LRH.
9387 *
9388 * This routine changes all VL values except VL15, which it maintains at
9389 * the same value.
9390 */
9391static void set_send_length(struct hfi1_pportdata *ppd)
9392{
9393 struct hfi1_devdata *dd = ppd->dd;
9394 u32 max_hb = lrh_max_header_bytes(dd), maxvlmtu = 0, dcmtu;
9395 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9396 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9397 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9398 int i;
9399
9400 for (i = 0; i < ppd->vls_supported; i++) {
9401 if (dd->vld[i].mtu > maxvlmtu)
9402 maxvlmtu = dd->vld[i].mtu;
9403 if (i <= 3)
9404 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9405 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9406 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9407 else
9408 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9409 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9410 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9411 }
9412 write_csr(dd, SEND_LEN_CHECK0, len1);
9413 write_csr(dd, SEND_LEN_CHECK1, len2);
9414 /* adjust kernel credit return thresholds based on new MTUs */
9415 /* all kernel receive contexts have the same hdrqentsize */
9416 for (i = 0; i < ppd->vls_supported; i++) {
9417 sc_set_cr_threshold(dd->vld[i].sc,
9418 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9419 dd->rcd[0]->rcvhdrqentsize));
9420 }
9421 sc_set_cr_threshold(dd->vld[15].sc,
9422 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9423 dd->rcd[0]->rcvhdrqentsize));
9424
9425 /* Adjust maximum MTU for the port in DC */
9426 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9427 (ilog2(maxvlmtu >> 8) + 1);
9428 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9429 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9430 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9431 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9432 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9433}
9434
9435static void set_lidlmc(struct hfi1_pportdata *ppd)
9436{
9437 int i;
9438 u64 sreg = 0;
9439 struct hfi1_devdata *dd = ppd->dd;
9440 u32 mask = ~((1U << ppd->lmc) - 1);
9441 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9442
9443 if (dd->hfi1_snoop.mode_flag)
9444 dd_dev_info(dd, "Set lid/lmc while snooping");
9445
9446 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9447 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9448 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9449 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9450 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9451 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9452 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9453
9454 /*
9455 * Iterate over all the send contexts and set their SLID check
9456 */
9457 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9458 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9459 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9460 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9461
9462 for (i = 0; i < dd->chip_send_contexts; i++) {
9463 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9464 i, (u32)sreg);
9465 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9466 }
9467
9468 /* Now we have to do the same thing for the sdma engines */
9469 sdma_update_lmc(dd, mask, ppd->lid);
9470}
9471
9472static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9473{
9474 unsigned long timeout;
9475 u32 curr_state;
9476
9477 timeout = jiffies + msecs_to_jiffies(msecs);
9478 while (1) {
9479 curr_state = read_physical_state(dd);
9480 if (curr_state == state)
9481 break;
9482 if (time_after(jiffies, timeout)) {
9483 dd_dev_err(dd,
9484 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9485 state, curr_state);
9486 return -ETIMEDOUT;
9487 }
9488 usleep_range(1950, 2050); /* sleep 2ms-ish */
9489 }
9490
9491 return 0;
9492}
9493
9494/*
9495 * Helper for set_link_state(). Do not call except from that routine.
9496 * Expects ppd->hls_mutex to be held.
9497 *
9498 * @rem_reason value to be sent to the neighbor
9499 *
9500 * LinkDownReasons only set if transition succeeds.
9501 */
9502static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9503{
9504 struct hfi1_devdata *dd = ppd->dd;
9505 u32 pstate, previous_state;
9506 u32 last_local_state;
9507 u32 last_remote_state;
9508 int ret;
9509 int do_transition;
9510 int do_wait;
9511
9512 previous_state = ppd->host_link_state;
9513 ppd->host_link_state = HLS_GOING_OFFLINE;
9514 pstate = read_physical_state(dd);
9515 if (pstate == PLS_OFFLINE) {
9516 do_transition = 0; /* in right state */
9517 do_wait = 0; /* ...no need to wait */
9518 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9519 do_transition = 0; /* in an offline transient state */
9520 do_wait = 1; /* ...wait for it to settle */
9521 } else {
9522 do_transition = 1; /* need to move to offline */
9523 do_wait = 1; /* ...will need to wait */
9524 }
9525
9526 if (do_transition) {
9527 ret = set_physical_link_state(dd,
9528 PLS_OFFLINE | (rem_reason << 8));
9529
9530 if (ret != HCMD_SUCCESS) {
9531 dd_dev_err(dd,
9532 "Failed to transition to Offline link state, return %d\n",
9533 ret);
9534 return -EINVAL;
9535 }
9536 if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE)
9537 ppd->offline_disabled_reason =
9538 OPA_LINKDOWN_REASON_TRANSIENT;
9539 }
9540
9541 if (do_wait) {
9542 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009543 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009544 if (ret < 0)
9545 return ret;
9546 }
9547
9548 /* make sure the logical state is also down */
9549 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9550
9551 /*
9552 * Now in charge of LCB - must be after the physical state is
9553 * offline.quiet and before host_link_state is changed.
9554 */
9555 set_host_lcb_access(dd);
9556 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9557 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9558
9559 /*
9560 * The LNI has a mandatory wait time after the physical state
9561 * moves to Offline.Quiet. The wait time may be different
9562 * depending on how the link went down. The 8051 firmware
9563 * will observe the needed wait time and only move to ready
9564 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009565 * is 6s, so wait that long and then at least 0.5s more for
9566 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009567 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009568 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009569 if (ret) {
9570 dd_dev_err(dd,
9571 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9572 /* state is really offline, so make it so */
9573 ppd->host_link_state = HLS_DN_OFFLINE;
9574 return ret;
9575 }
9576
9577 /*
9578 * The state is now offline and the 8051 is ready to accept host
9579 * requests.
9580 * - change our state
9581 * - notify others if we were previously in a linkup state
9582 */
9583 ppd->host_link_state = HLS_DN_OFFLINE;
9584 if (previous_state & HLS_UP) {
9585 /* went down while link was up */
9586 handle_linkup_change(dd, 0);
9587 } else if (previous_state
9588 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9589 /* went down while attempting link up */
9590 /* byte 1 of last_*_state is the failure reason */
9591 read_last_local_state(dd, &last_local_state);
9592 read_last_remote_state(dd, &last_remote_state);
9593 dd_dev_err(dd,
9594 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9595 last_local_state, last_remote_state);
9596 }
9597
9598 /* the active link width (downgrade) is 0 on link down */
9599 ppd->link_width_active = 0;
9600 ppd->link_width_downgrade_tx_active = 0;
9601 ppd->link_width_downgrade_rx_active = 0;
9602 ppd->current_egress_rate = 0;
9603 return 0;
9604}
9605
9606/* return the link state name */
9607static const char *link_state_name(u32 state)
9608{
9609 const char *name;
9610 int n = ilog2(state);
9611 static const char * const names[] = {
9612 [__HLS_UP_INIT_BP] = "INIT",
9613 [__HLS_UP_ARMED_BP] = "ARMED",
9614 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9615 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9616 [__HLS_DN_POLL_BP] = "POLL",
9617 [__HLS_DN_DISABLE_BP] = "DISABLE",
9618 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9619 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9620 [__HLS_GOING_UP_BP] = "GOING_UP",
9621 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9622 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9623 };
9624
9625 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9626 return name ? name : "unknown";
9627}
9628
9629/* return the link state reason name */
9630static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9631{
9632 if (state == HLS_UP_INIT) {
9633 switch (ppd->linkinit_reason) {
9634 case OPA_LINKINIT_REASON_LINKUP:
9635 return "(LINKUP)";
9636 case OPA_LINKINIT_REASON_FLAPPING:
9637 return "(FLAPPING)";
9638 case OPA_LINKINIT_OUTSIDE_POLICY:
9639 return "(OUTSIDE_POLICY)";
9640 case OPA_LINKINIT_QUARANTINED:
9641 return "(QUARANTINED)";
9642 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9643 return "(INSUFIC_CAPABILITY)";
9644 default:
9645 break;
9646 }
9647 }
9648 return "";
9649}
9650
9651/*
9652 * driver_physical_state - convert the driver's notion of a port's
9653 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9654 * Return -1 (converted to a u32) to indicate error.
9655 */
9656u32 driver_physical_state(struct hfi1_pportdata *ppd)
9657{
9658 switch (ppd->host_link_state) {
9659 case HLS_UP_INIT:
9660 case HLS_UP_ARMED:
9661 case HLS_UP_ACTIVE:
9662 return IB_PORTPHYSSTATE_LINKUP;
9663 case HLS_DN_POLL:
9664 return IB_PORTPHYSSTATE_POLLING;
9665 case HLS_DN_DISABLE:
9666 return IB_PORTPHYSSTATE_DISABLED;
9667 case HLS_DN_OFFLINE:
9668 return OPA_PORTPHYSSTATE_OFFLINE;
9669 case HLS_VERIFY_CAP:
9670 return IB_PORTPHYSSTATE_POLLING;
9671 case HLS_GOING_UP:
9672 return IB_PORTPHYSSTATE_POLLING;
9673 case HLS_GOING_OFFLINE:
9674 return OPA_PORTPHYSSTATE_OFFLINE;
9675 case HLS_LINK_COOLDOWN:
9676 return OPA_PORTPHYSSTATE_OFFLINE;
9677 case HLS_DN_DOWNDEF:
9678 default:
9679 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9680 ppd->host_link_state);
9681 return -1;
9682 }
9683}
9684
9685/*
9686 * driver_logical_state - convert the driver's notion of a port's
9687 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9688 * (converted to a u32) to indicate error.
9689 */
9690u32 driver_logical_state(struct hfi1_pportdata *ppd)
9691{
9692 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9693 return IB_PORT_DOWN;
9694
9695 switch (ppd->host_link_state & HLS_UP) {
9696 case HLS_UP_INIT:
9697 return IB_PORT_INIT;
9698 case HLS_UP_ARMED:
9699 return IB_PORT_ARMED;
9700 case HLS_UP_ACTIVE:
9701 return IB_PORT_ACTIVE;
9702 default:
9703 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9704 ppd->host_link_state);
9705 return -1;
9706 }
9707}
9708
9709void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9710 u8 neigh_reason, u8 rem_reason)
9711{
9712 if (ppd->local_link_down_reason.latest == 0 &&
9713 ppd->neigh_link_down_reason.latest == 0) {
9714 ppd->local_link_down_reason.latest = lcl_reason;
9715 ppd->neigh_link_down_reason.latest = neigh_reason;
9716 ppd->remote_link_down_reason = rem_reason;
9717 }
9718}
9719
9720/*
9721 * Change the physical and/or logical link state.
9722 *
9723 * Do not call this routine while inside an interrupt. It contains
9724 * calls to routines that can take multiple seconds to finish.
9725 *
9726 * Returns 0 on success, -errno on failure.
9727 */
9728int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9729{
9730 struct hfi1_devdata *dd = ppd->dd;
9731 struct ib_event event = {.device = NULL};
9732 int ret1, ret = 0;
9733 int was_up, is_down;
9734 int orig_new_state, poll_bounce;
9735
9736 mutex_lock(&ppd->hls_lock);
9737
9738 orig_new_state = state;
9739 if (state == HLS_DN_DOWNDEF)
9740 state = dd->link_default;
9741
9742 /* interpret poll -> poll as a link bounce */
9743 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9744 && state == HLS_DN_POLL;
9745
9746 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9747 link_state_name(ppd->host_link_state),
9748 link_state_name(orig_new_state),
9749 poll_bounce ? "(bounce) " : "",
9750 link_state_reason_name(ppd, state));
9751
9752 was_up = !!(ppd->host_link_state & HLS_UP);
9753
9754 /*
9755 * If we're going to a (HLS_*) link state that implies the logical
9756 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9757 * reset is_sm_config_started to 0.
9758 */
9759 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9760 ppd->is_sm_config_started = 0;
9761
9762 /*
9763 * Do nothing if the states match. Let a poll to poll link bounce
9764 * go through.
9765 */
9766 if (ppd->host_link_state == state && !poll_bounce)
9767 goto done;
9768
9769 switch (state) {
9770 case HLS_UP_INIT:
9771 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9772 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9773 /*
9774 * Quick link up jumps from polling to here.
9775 *
9776 * Whether in normal or loopback mode, the
9777 * simulator jumps from polling to link up.
9778 * Accept that here.
9779 */
9780 /* OK */;
9781 } else if (ppd->host_link_state != HLS_GOING_UP) {
9782 goto unexpected;
9783 }
9784
9785 ppd->host_link_state = HLS_UP_INIT;
9786 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9787 if (ret) {
9788 /* logical state didn't change, stay at going_up */
9789 ppd->host_link_state = HLS_GOING_UP;
9790 dd_dev_err(dd,
9791 "%s: logical state did not change to INIT\n",
9792 __func__);
9793 } else {
9794 /* clear old transient LINKINIT_REASON code */
9795 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9796 ppd->linkinit_reason =
9797 OPA_LINKINIT_REASON_LINKUP;
9798
9799 /* enable the port */
9800 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9801
9802 handle_linkup_change(dd, 1);
9803 }
9804 break;
9805 case HLS_UP_ARMED:
9806 if (ppd->host_link_state != HLS_UP_INIT)
9807 goto unexpected;
9808
9809 ppd->host_link_state = HLS_UP_ARMED;
9810 set_logical_state(dd, LSTATE_ARMED);
9811 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9812 if (ret) {
9813 /* logical state didn't change, stay at init */
9814 ppd->host_link_state = HLS_UP_INIT;
9815 dd_dev_err(dd,
9816 "%s: logical state did not change to ARMED\n",
9817 __func__);
9818 }
9819 /*
9820 * The simulator does not currently implement SMA messages,
9821 * so neighbor_normal is not set. Set it here when we first
9822 * move to Armed.
9823 */
9824 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9825 ppd->neighbor_normal = 1;
9826 break;
9827 case HLS_UP_ACTIVE:
9828 if (ppd->host_link_state != HLS_UP_ARMED)
9829 goto unexpected;
9830
9831 ppd->host_link_state = HLS_UP_ACTIVE;
9832 set_logical_state(dd, LSTATE_ACTIVE);
9833 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9834 if (ret) {
9835 /* logical state didn't change, stay at armed */
9836 ppd->host_link_state = HLS_UP_ARMED;
9837 dd_dev_err(dd,
9838 "%s: logical state did not change to ACTIVE\n",
9839 __func__);
9840 } else {
9841
9842 /* tell all engines to go running */
9843 sdma_all_running(dd);
9844
9845 /* Signal the IB layer that the port has went active */
9846 event.device = &dd->verbs_dev.ibdev;
9847 event.element.port_num = ppd->port;
9848 event.event = IB_EVENT_PORT_ACTIVE;
9849 }
9850 break;
9851 case HLS_DN_POLL:
9852 if ((ppd->host_link_state == HLS_DN_DISABLE ||
9853 ppd->host_link_state == HLS_DN_OFFLINE) &&
9854 dd->dc_shutdown)
9855 dc_start(dd);
9856 /* Hand LED control to the DC */
9857 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
9858
9859 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9860 u8 tmp = ppd->link_enabled;
9861
9862 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9863 if (ret) {
9864 ppd->link_enabled = tmp;
9865 break;
9866 }
9867 ppd->remote_link_down_reason = 0;
9868
9869 if (ppd->driver_link_ready)
9870 ppd->link_enabled = 1;
9871 }
9872
9873 ret = set_local_link_attributes(ppd);
9874 if (ret)
9875 break;
9876
9877 ppd->port_error_action = 0;
9878 ppd->host_link_state = HLS_DN_POLL;
9879
9880 if (quick_linkup) {
9881 /* quick linkup does not go into polling */
9882 ret = do_quick_linkup(dd);
9883 } else {
9884 ret1 = set_physical_link_state(dd, PLS_POLLING);
9885 if (ret1 != HCMD_SUCCESS) {
9886 dd_dev_err(dd,
9887 "Failed to transition to Polling link state, return 0x%x\n",
9888 ret1);
9889 ret = -EINVAL;
9890 }
9891 }
9892 ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
9893 /*
9894 * If an error occurred above, go back to offline. The
9895 * caller may reschedule another attempt.
9896 */
9897 if (ret)
9898 goto_offline(ppd, 0);
9899 break;
9900 case HLS_DN_DISABLE:
9901 /* link is disabled */
9902 ppd->link_enabled = 0;
9903
9904 /* allow any state to transition to disabled */
9905
9906 /* must transition to offline first */
9907 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9908 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9909 if (ret)
9910 break;
9911 ppd->remote_link_down_reason = 0;
9912 }
9913
9914 ret1 = set_physical_link_state(dd, PLS_DISABLED);
9915 if (ret1 != HCMD_SUCCESS) {
9916 dd_dev_err(dd,
9917 "Failed to transition to Disabled link state, return 0x%x\n",
9918 ret1);
9919 ret = -EINVAL;
9920 break;
9921 }
9922 ppd->host_link_state = HLS_DN_DISABLE;
9923 dc_shutdown(dd);
9924 break;
9925 case HLS_DN_OFFLINE:
9926 if (ppd->host_link_state == HLS_DN_DISABLE)
9927 dc_start(dd);
9928
9929 /* allow any state to transition to offline */
9930 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9931 if (!ret)
9932 ppd->remote_link_down_reason = 0;
9933 break;
9934 case HLS_VERIFY_CAP:
9935 if (ppd->host_link_state != HLS_DN_POLL)
9936 goto unexpected;
9937 ppd->host_link_state = HLS_VERIFY_CAP;
9938 break;
9939 case HLS_GOING_UP:
9940 if (ppd->host_link_state != HLS_VERIFY_CAP)
9941 goto unexpected;
9942
9943 ret1 = set_physical_link_state(dd, PLS_LINKUP);
9944 if (ret1 != HCMD_SUCCESS) {
9945 dd_dev_err(dd,
9946 "Failed to transition to link up state, return 0x%x\n",
9947 ret1);
9948 ret = -EINVAL;
9949 break;
9950 }
9951 ppd->host_link_state = HLS_GOING_UP;
9952 break;
9953
9954 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
9955 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
9956 default:
9957 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
9958 __func__, state);
9959 ret = -EINVAL;
9960 break;
9961 }
9962
9963 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
9964 HLS_DN_DISABLE | HLS_DN_OFFLINE));
9965
9966 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
9967 ppd->neigh_link_down_reason.sma == 0) {
9968 ppd->local_link_down_reason.sma =
9969 ppd->local_link_down_reason.latest;
9970 ppd->neigh_link_down_reason.sma =
9971 ppd->neigh_link_down_reason.latest;
9972 }
9973
9974 goto done;
9975
9976unexpected:
9977 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
9978 __func__, link_state_name(ppd->host_link_state),
9979 link_state_name(state));
9980 ret = -EINVAL;
9981
9982done:
9983 mutex_unlock(&ppd->hls_lock);
9984
9985 if (event.device)
9986 ib_dispatch_event(&event);
9987
9988 return ret;
9989}
9990
9991int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
9992{
9993 u64 reg;
9994 int ret = 0;
9995
9996 switch (which) {
9997 case HFI1_IB_CFG_LIDLMC:
9998 set_lidlmc(ppd);
9999 break;
10000 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10001 /*
10002 * The VL Arbitrator high limit is sent in units of 4k
10003 * bytes, while HFI stores it in units of 64 bytes.
10004 */
10005 val *= 4096/64;
10006 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10007 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10008 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10009 break;
10010 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10011 /* HFI only supports POLL as the default link down state */
10012 if (val != HLS_DN_POLL)
10013 ret = -EINVAL;
10014 break;
10015 case HFI1_IB_CFG_OP_VLS:
10016 if (ppd->vls_operational != val) {
10017 ppd->vls_operational = val;
10018 if (!ppd->port)
10019 ret = -EINVAL;
10020 else
10021 ret = sdma_map_init(
10022 ppd->dd,
10023 ppd->port - 1,
10024 val,
10025 NULL);
10026 }
10027 break;
10028 /*
10029 * For link width, link width downgrade, and speed enable, always AND
10030 * the setting with what is actually supported. This has two benefits.
10031 * First, enabled can't have unsupported values, no matter what the
10032 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10033 * "fill in with your supported value" have all the bits in the
10034 * field set, so simply ANDing with supported has the desired result.
10035 */
10036 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10037 ppd->link_width_enabled = val & ppd->link_width_supported;
10038 break;
10039 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10040 ppd->link_width_downgrade_enabled =
10041 val & ppd->link_width_downgrade_supported;
10042 break;
10043 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10044 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10045 break;
10046 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10047 /*
10048 * HFI does not follow IB specs, save this value
10049 * so we can report it, if asked.
10050 */
10051 ppd->overrun_threshold = val;
10052 break;
10053 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10054 /*
10055 * HFI does not follow IB specs, save this value
10056 * so we can report it, if asked.
10057 */
10058 ppd->phy_error_threshold = val;
10059 break;
10060
10061 case HFI1_IB_CFG_MTU:
10062 set_send_length(ppd);
10063 break;
10064
10065 case HFI1_IB_CFG_PKEYS:
10066 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10067 set_partition_keys(ppd);
10068 break;
10069
10070 default:
10071 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10072 dd_dev_info(ppd->dd,
10073 "%s: which %s, val 0x%x: not implemented\n",
10074 __func__, ib_cfg_name(which), val);
10075 break;
10076 }
10077 return ret;
10078}
10079
10080/* begin functions related to vl arbitration table caching */
10081static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10082{
10083 int i;
10084
10085 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10086 VL_ARB_LOW_PRIO_TABLE_SIZE);
10087 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10088 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10089
10090 /*
10091 * Note that we always return values directly from the
10092 * 'vl_arb_cache' (and do no CSR reads) in response to a
10093 * 'Get(VLArbTable)'. This is obviously correct after a
10094 * 'Set(VLArbTable)', since the cache will then be up to
10095 * date. But it's also correct prior to any 'Set(VLArbTable)'
10096 * since then both the cache, and the relevant h/w registers
10097 * will be zeroed.
10098 */
10099
10100 for (i = 0; i < MAX_PRIO_TABLE; i++)
10101 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10102}
10103
10104/*
10105 * vl_arb_lock_cache
10106 *
10107 * All other vl_arb_* functions should be called only after locking
10108 * the cache.
10109 */
10110static inline struct vl_arb_cache *
10111vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10112{
10113 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10114 return NULL;
10115 spin_lock(&ppd->vl_arb_cache[idx].lock);
10116 return &ppd->vl_arb_cache[idx];
10117}
10118
10119static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10120{
10121 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10122}
10123
10124static void vl_arb_get_cache(struct vl_arb_cache *cache,
10125 struct ib_vl_weight_elem *vl)
10126{
10127 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10128}
10129
10130static void vl_arb_set_cache(struct vl_arb_cache *cache,
10131 struct ib_vl_weight_elem *vl)
10132{
10133 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10134}
10135
10136static int vl_arb_match_cache(struct vl_arb_cache *cache,
10137 struct ib_vl_weight_elem *vl)
10138{
10139 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10140}
10141/* end functions related to vl arbitration table caching */
10142
10143static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10144 u32 size, struct ib_vl_weight_elem *vl)
10145{
10146 struct hfi1_devdata *dd = ppd->dd;
10147 u64 reg;
10148 unsigned int i, is_up = 0;
10149 int drain, ret = 0;
10150
10151 mutex_lock(&ppd->hls_lock);
10152
10153 if (ppd->host_link_state & HLS_UP)
10154 is_up = 1;
10155
10156 drain = !is_ax(dd) && is_up;
10157
10158 if (drain)
10159 /*
10160 * Before adjusting VL arbitration weights, empty per-VL
10161 * FIFOs, otherwise a packet whose VL weight is being
10162 * set to 0 could get stuck in a FIFO with no chance to
10163 * egress.
10164 */
10165 ret = stop_drain_data_vls(dd);
10166
10167 if (ret) {
10168 dd_dev_err(
10169 dd,
10170 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10171 __func__);
10172 goto err;
10173 }
10174
10175 for (i = 0; i < size; i++, vl++) {
10176 /*
10177 * NOTE: The low priority shift and mask are used here, but
10178 * they are the same for both the low and high registers.
10179 */
10180 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10181 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10182 | (((u64)vl->weight
10183 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10184 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10185 write_csr(dd, target + (i * 8), reg);
10186 }
10187 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10188
10189 if (drain)
10190 open_fill_data_vls(dd); /* reopen all VLs */
10191
10192err:
10193 mutex_unlock(&ppd->hls_lock);
10194
10195 return ret;
10196}
10197
10198/*
10199 * Read one credit merge VL register.
10200 */
10201static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10202 struct vl_limit *vll)
10203{
10204 u64 reg = read_csr(dd, csr);
10205
10206 vll->dedicated = cpu_to_be16(
10207 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10208 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10209 vll->shared = cpu_to_be16(
10210 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10211 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10212}
10213
10214/*
10215 * Read the current credit merge limits.
10216 */
10217static int get_buffer_control(struct hfi1_devdata *dd,
10218 struct buffer_control *bc, u16 *overall_limit)
10219{
10220 u64 reg;
10221 int i;
10222
10223 /* not all entries are filled in */
10224 memset(bc, 0, sizeof(*bc));
10225
10226 /* OPA and HFI have a 1-1 mapping */
10227 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10228 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10229
10230 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10231 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10232
10233 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10234 bc->overall_shared_limit = cpu_to_be16(
10235 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10236 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10237 if (overall_limit)
10238 *overall_limit = (reg
10239 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10240 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10241 return sizeof(struct buffer_control);
10242}
10243
10244static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10245{
10246 u64 reg;
10247 int i;
10248
10249 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10250 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10251 for (i = 0; i < sizeof(u64); i++) {
10252 u8 byte = *(((u8 *)&reg) + i);
10253
10254 dp->vlnt[2 * i] = byte & 0xf;
10255 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10256 }
10257
10258 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10259 for (i = 0; i < sizeof(u64); i++) {
10260 u8 byte = *(((u8 *)&reg) + i);
10261
10262 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10263 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10264 }
10265 return sizeof(struct sc2vlnt);
10266}
10267
10268static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10269 struct ib_vl_weight_elem *vl)
10270{
10271 unsigned int i;
10272
10273 for (i = 0; i < nelems; i++, vl++) {
10274 vl->vl = 0xf;
10275 vl->weight = 0;
10276 }
10277}
10278
10279static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10280{
10281 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10282 DC_SC_VL_VAL(15_0,
10283 0, dp->vlnt[0] & 0xf,
10284 1, dp->vlnt[1] & 0xf,
10285 2, dp->vlnt[2] & 0xf,
10286 3, dp->vlnt[3] & 0xf,
10287 4, dp->vlnt[4] & 0xf,
10288 5, dp->vlnt[5] & 0xf,
10289 6, dp->vlnt[6] & 0xf,
10290 7, dp->vlnt[7] & 0xf,
10291 8, dp->vlnt[8] & 0xf,
10292 9, dp->vlnt[9] & 0xf,
10293 10, dp->vlnt[10] & 0xf,
10294 11, dp->vlnt[11] & 0xf,
10295 12, dp->vlnt[12] & 0xf,
10296 13, dp->vlnt[13] & 0xf,
10297 14, dp->vlnt[14] & 0xf,
10298 15, dp->vlnt[15] & 0xf));
10299 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10300 DC_SC_VL_VAL(31_16,
10301 16, dp->vlnt[16] & 0xf,
10302 17, dp->vlnt[17] & 0xf,
10303 18, dp->vlnt[18] & 0xf,
10304 19, dp->vlnt[19] & 0xf,
10305 20, dp->vlnt[20] & 0xf,
10306 21, dp->vlnt[21] & 0xf,
10307 22, dp->vlnt[22] & 0xf,
10308 23, dp->vlnt[23] & 0xf,
10309 24, dp->vlnt[24] & 0xf,
10310 25, dp->vlnt[25] & 0xf,
10311 26, dp->vlnt[26] & 0xf,
10312 27, dp->vlnt[27] & 0xf,
10313 28, dp->vlnt[28] & 0xf,
10314 29, dp->vlnt[29] & 0xf,
10315 30, dp->vlnt[30] & 0xf,
10316 31, dp->vlnt[31] & 0xf));
10317}
10318
10319static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10320 u16 limit)
10321{
10322 if (limit != 0)
10323 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10324 what, (int)limit, idx);
10325}
10326
10327/* change only the shared limit portion of SendCmGLobalCredit */
10328static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10329{
10330 u64 reg;
10331
10332 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10333 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10334 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10335 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10336}
10337
10338/* change only the total credit limit portion of SendCmGLobalCredit */
10339static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10340{
10341 u64 reg;
10342
10343 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10344 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10345 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10346 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10347}
10348
10349/* set the given per-VL shared limit */
10350static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10351{
10352 u64 reg;
10353 u32 addr;
10354
10355 if (vl < TXE_NUM_DATA_VL)
10356 addr = SEND_CM_CREDIT_VL + (8 * vl);
10357 else
10358 addr = SEND_CM_CREDIT_VL15;
10359
10360 reg = read_csr(dd, addr);
10361 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10362 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10363 write_csr(dd, addr, reg);
10364}
10365
10366/* set the given per-VL dedicated limit */
10367static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10368{
10369 u64 reg;
10370 u32 addr;
10371
10372 if (vl < TXE_NUM_DATA_VL)
10373 addr = SEND_CM_CREDIT_VL + (8 * vl);
10374 else
10375 addr = SEND_CM_CREDIT_VL15;
10376
10377 reg = read_csr(dd, addr);
10378 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10379 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10380 write_csr(dd, addr, reg);
10381}
10382
10383/* spin until the given per-VL status mask bits clear */
10384static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10385 const char *which)
10386{
10387 unsigned long timeout;
10388 u64 reg;
10389
10390 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10391 while (1) {
10392 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10393
10394 if (reg == 0)
10395 return; /* success */
10396 if (time_after(jiffies, timeout))
10397 break; /* timed out */
10398 udelay(1);
10399 }
10400
10401 dd_dev_err(dd,
10402 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10403 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10404 /*
10405 * If this occurs, it is likely there was a credit loss on the link.
10406 * The only recovery from that is a link bounce.
10407 */
10408 dd_dev_err(dd,
10409 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10410}
10411
10412/*
10413 * The number of credits on the VLs may be changed while everything
10414 * is "live", but the following algorithm must be followed due to
10415 * how the hardware is actually implemented. In particular,
10416 * Return_Credit_Status[] is the only correct status check.
10417 *
10418 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10419 * set Global_Shared_Credit_Limit = 0
10420 * use_all_vl = 1
10421 * mask0 = all VLs that are changing either dedicated or shared limits
10422 * set Shared_Limit[mask0] = 0
10423 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10424 * if (changing any dedicated limit)
10425 * mask1 = all VLs that are lowering dedicated limits
10426 * lower Dedicated_Limit[mask1]
10427 * spin until Return_Credit_Status[mask1] == 0
10428 * raise Dedicated_Limits
10429 * raise Shared_Limits
10430 * raise Global_Shared_Credit_Limit
10431 *
10432 * lower = if the new limit is lower, set the limit to the new value
10433 * raise = if the new limit is higher than the current value (may be changed
10434 * earlier in the algorithm), set the new limit to the new value
10435 */
10436static int set_buffer_control(struct hfi1_devdata *dd,
10437 struct buffer_control *new_bc)
10438{
10439 u64 changing_mask, ld_mask, stat_mask;
10440 int change_count;
10441 int i, use_all_mask;
10442 int this_shared_changing;
10443 /*
10444 * A0: add the variable any_shared_limit_changing below and in the
10445 * algorithm above. If removing A0 support, it can be removed.
10446 */
10447 int any_shared_limit_changing;
10448 struct buffer_control cur_bc;
10449 u8 changing[OPA_MAX_VLS];
10450 u8 lowering_dedicated[OPA_MAX_VLS];
10451 u16 cur_total;
10452 u32 new_total = 0;
10453 const u64 all_mask =
10454 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10455 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10456 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10457 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10458 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10459 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10460 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10461 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10462 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10463
10464#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10465#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10466
10467
10468 /* find the new total credits, do sanity check on unused VLs */
10469 for (i = 0; i < OPA_MAX_VLS; i++) {
10470 if (valid_vl(i)) {
10471 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10472 continue;
10473 }
10474 nonzero_msg(dd, i, "dedicated",
10475 be16_to_cpu(new_bc->vl[i].dedicated));
10476 nonzero_msg(dd, i, "shared",
10477 be16_to_cpu(new_bc->vl[i].shared));
10478 new_bc->vl[i].dedicated = 0;
10479 new_bc->vl[i].shared = 0;
10480 }
10481 new_total += be16_to_cpu(new_bc->overall_shared_limit);
10482 if (new_total > (u32)dd->link_credits)
10483 return -EINVAL;
10484 /* fetch the current values */
10485 get_buffer_control(dd, &cur_bc, &cur_total);
10486
10487 /*
10488 * Create the masks we will use.
10489 */
10490 memset(changing, 0, sizeof(changing));
10491 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10492 /* NOTE: Assumes that the individual VL bits are adjacent and in
10493 increasing order */
10494 stat_mask =
10495 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10496 changing_mask = 0;
10497 ld_mask = 0;
10498 change_count = 0;
10499 any_shared_limit_changing = 0;
10500 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10501 if (!valid_vl(i))
10502 continue;
10503 this_shared_changing = new_bc->vl[i].shared
10504 != cur_bc.vl[i].shared;
10505 if (this_shared_changing)
10506 any_shared_limit_changing = 1;
10507 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10508 || this_shared_changing) {
10509 changing[i] = 1;
10510 changing_mask |= stat_mask;
10511 change_count++;
10512 }
10513 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10514 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10515 lowering_dedicated[i] = 1;
10516 ld_mask |= stat_mask;
10517 }
10518 }
10519
10520 /* bracket the credit change with a total adjustment */
10521 if (new_total > cur_total)
10522 set_global_limit(dd, new_total);
10523
10524 /*
10525 * Start the credit change algorithm.
10526 */
10527 use_all_mask = 0;
10528 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010529 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10530 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010531 set_global_shared(dd, 0);
10532 cur_bc.overall_shared_limit = 0;
10533 use_all_mask = 1;
10534 }
10535
10536 for (i = 0; i < NUM_USABLE_VLS; i++) {
10537 if (!valid_vl(i))
10538 continue;
10539
10540 if (changing[i]) {
10541 set_vl_shared(dd, i, 0);
10542 cur_bc.vl[i].shared = 0;
10543 }
10544 }
10545
10546 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10547 "shared");
10548
10549 if (change_count > 0) {
10550 for (i = 0; i < NUM_USABLE_VLS; i++) {
10551 if (!valid_vl(i))
10552 continue;
10553
10554 if (lowering_dedicated[i]) {
10555 set_vl_dedicated(dd, i,
10556 be16_to_cpu(new_bc->vl[i].dedicated));
10557 cur_bc.vl[i].dedicated =
10558 new_bc->vl[i].dedicated;
10559 }
10560 }
10561
10562 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10563
10564 /* now raise all dedicated that are going up */
10565 for (i = 0; i < NUM_USABLE_VLS; i++) {
10566 if (!valid_vl(i))
10567 continue;
10568
10569 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10570 be16_to_cpu(cur_bc.vl[i].dedicated))
10571 set_vl_dedicated(dd, i,
10572 be16_to_cpu(new_bc->vl[i].dedicated));
10573 }
10574 }
10575
10576 /* next raise all shared that are going up */
10577 for (i = 0; i < NUM_USABLE_VLS; i++) {
10578 if (!valid_vl(i))
10579 continue;
10580
10581 if (be16_to_cpu(new_bc->vl[i].shared) >
10582 be16_to_cpu(cur_bc.vl[i].shared))
10583 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10584 }
10585
10586 /* finally raise the global shared */
10587 if (be16_to_cpu(new_bc->overall_shared_limit) >
10588 be16_to_cpu(cur_bc.overall_shared_limit))
10589 set_global_shared(dd,
10590 be16_to_cpu(new_bc->overall_shared_limit));
10591
10592 /* bracket the credit change with a total adjustment */
10593 if (new_total < cur_total)
10594 set_global_limit(dd, new_total);
10595 return 0;
10596}
10597
10598/*
10599 * Read the given fabric manager table. Return the size of the
10600 * table (in bytes) on success, and a negative error code on
10601 * failure.
10602 */
10603int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10604
10605{
10606 int size;
10607 struct vl_arb_cache *vlc;
10608
10609 switch (which) {
10610 case FM_TBL_VL_HIGH_ARB:
10611 size = 256;
10612 /*
10613 * OPA specifies 128 elements (of 2 bytes each), though
10614 * HFI supports only 16 elements in h/w.
10615 */
10616 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10617 vl_arb_get_cache(vlc, t);
10618 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10619 break;
10620 case FM_TBL_VL_LOW_ARB:
10621 size = 256;
10622 /*
10623 * OPA specifies 128 elements (of 2 bytes each), though
10624 * HFI supports only 16 elements in h/w.
10625 */
10626 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10627 vl_arb_get_cache(vlc, t);
10628 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10629 break;
10630 case FM_TBL_BUFFER_CONTROL:
10631 size = get_buffer_control(ppd->dd, t, NULL);
10632 break;
10633 case FM_TBL_SC2VLNT:
10634 size = get_sc2vlnt(ppd->dd, t);
10635 break;
10636 case FM_TBL_VL_PREEMPT_ELEMS:
10637 size = 256;
10638 /* OPA specifies 128 elements, of 2 bytes each */
10639 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10640 break;
10641 case FM_TBL_VL_PREEMPT_MATRIX:
10642 size = 256;
10643 /*
10644 * OPA specifies that this is the same size as the VL
10645 * arbitration tables (i.e., 256 bytes).
10646 */
10647 break;
10648 default:
10649 return -EINVAL;
10650 }
10651 return size;
10652}
10653
10654/*
10655 * Write the given fabric manager table.
10656 */
10657int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10658{
10659 int ret = 0;
10660 struct vl_arb_cache *vlc;
10661
10662 switch (which) {
10663 case FM_TBL_VL_HIGH_ARB:
10664 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10665 if (vl_arb_match_cache(vlc, t)) {
10666 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10667 break;
10668 }
10669 vl_arb_set_cache(vlc, t);
10670 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10671 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10672 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10673 break;
10674 case FM_TBL_VL_LOW_ARB:
10675 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10676 if (vl_arb_match_cache(vlc, t)) {
10677 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10678 break;
10679 }
10680 vl_arb_set_cache(vlc, t);
10681 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10682 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10683 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10684 break;
10685 case FM_TBL_BUFFER_CONTROL:
10686 ret = set_buffer_control(ppd->dd, t);
10687 break;
10688 case FM_TBL_SC2VLNT:
10689 set_sc2vlnt(ppd->dd, t);
10690 break;
10691 default:
10692 ret = -EINVAL;
10693 }
10694 return ret;
10695}
10696
10697/*
10698 * Disable all data VLs.
10699 *
10700 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10701 */
10702static int disable_data_vls(struct hfi1_devdata *dd)
10703{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010704 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010705 return 1;
10706
10707 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10708
10709 return 0;
10710}
10711
10712/*
10713 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10714 * Just re-enables all data VLs (the "fill" part happens
10715 * automatically - the name was chosen for symmetry with
10716 * stop_drain_data_vls()).
10717 *
10718 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10719 */
10720int open_fill_data_vls(struct hfi1_devdata *dd)
10721{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010722 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010723 return 1;
10724
10725 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10726
10727 return 0;
10728}
10729
10730/*
10731 * drain_data_vls() - assumes that disable_data_vls() has been called,
10732 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10733 * engines to drop to 0.
10734 */
10735static void drain_data_vls(struct hfi1_devdata *dd)
10736{
10737 sc_wait(dd);
10738 sdma_wait(dd);
10739 pause_for_credit_return(dd);
10740}
10741
10742/*
10743 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10744 *
10745 * Use open_fill_data_vls() to resume using data VLs. This pair is
10746 * meant to be used like this:
10747 *
10748 * stop_drain_data_vls(dd);
10749 * // do things with per-VL resources
10750 * open_fill_data_vls(dd);
10751 */
10752int stop_drain_data_vls(struct hfi1_devdata *dd)
10753{
10754 int ret;
10755
10756 ret = disable_data_vls(dd);
10757 if (ret == 0)
10758 drain_data_vls(dd);
10759
10760 return ret;
10761}
10762
10763/*
10764 * Convert a nanosecond time to a cclock count. No matter how slow
10765 * the cclock, a non-zero ns will always have a non-zero result.
10766 */
10767u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10768{
10769 u32 cclocks;
10770
10771 if (dd->icode == ICODE_FPGA_EMULATION)
10772 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10773 else /* simulation pretends to be ASIC */
10774 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10775 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
10776 cclocks = 1;
10777 return cclocks;
10778}
10779
10780/*
10781 * Convert a cclock count to nanoseconds. Not matter how slow
10782 * the cclock, a non-zero cclocks will always have a non-zero result.
10783 */
10784u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10785{
10786 u32 ns;
10787
10788 if (dd->icode == ICODE_FPGA_EMULATION)
10789 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10790 else /* simulation pretends to be ASIC */
10791 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10792 if (cclocks && !ns)
10793 ns = 1;
10794 return ns;
10795}
10796
10797/*
10798 * Dynamically adjust the receive interrupt timeout for a context based on
10799 * incoming packet rate.
10800 *
10801 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10802 */
10803static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10804{
10805 struct hfi1_devdata *dd = rcd->dd;
10806 u32 timeout = rcd->rcvavail_timeout;
10807
10808 /*
10809 * This algorithm doubles or halves the timeout depending on whether
10810 * the number of packets received in this interrupt were less than or
10811 * greater equal the interrupt count.
10812 *
10813 * The calculations below do not allow a steady state to be achieved.
10814 * Only at the endpoints it is possible to have an unchanging
10815 * timeout.
10816 */
10817 if (npkts < rcv_intr_count) {
10818 /*
10819 * Not enough packets arrived before the timeout, adjust
10820 * timeout downward.
10821 */
10822 if (timeout < 2) /* already at minimum? */
10823 return;
10824 timeout >>= 1;
10825 } else {
10826 /*
10827 * More than enough packets arrived before the timeout, adjust
10828 * timeout upward.
10829 */
10830 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10831 return;
10832 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10833 }
10834
10835 rcd->rcvavail_timeout = timeout;
10836 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10837 been verified to be in range */
10838 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10839 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10840}
10841
10842void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
10843 u32 intr_adjust, u32 npkts)
10844{
10845 struct hfi1_devdata *dd = rcd->dd;
10846 u64 reg;
10847 u32 ctxt = rcd->ctxt;
10848
10849 /*
10850 * Need to write timeout register before updating RcvHdrHead to ensure
10851 * that a new value is used when the HW decides to restart counting.
10852 */
10853 if (intr_adjust)
10854 adjust_rcv_timeout(rcd, npkts);
10855 if (updegr) {
10856 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
10857 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
10858 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
10859 }
10860 mmiowb();
10861 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
10862 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
10863 << RCV_HDR_HEAD_HEAD_SHIFT);
10864 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
10865 mmiowb();
10866}
10867
10868u32 hdrqempty(struct hfi1_ctxtdata *rcd)
10869{
10870 u32 head, tail;
10871
10872 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
10873 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
10874
10875 if (rcd->rcvhdrtail_kvaddr)
10876 tail = get_rcvhdrtail(rcd);
10877 else
10878 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
10879
10880 return head == tail;
10881}
10882
10883/*
10884 * Context Control and Receive Array encoding for buffer size:
10885 * 0x0 invalid
10886 * 0x1 4 KB
10887 * 0x2 8 KB
10888 * 0x3 16 KB
10889 * 0x4 32 KB
10890 * 0x5 64 KB
10891 * 0x6 128 KB
10892 * 0x7 256 KB
10893 * 0x8 512 KB (Receive Array only)
10894 * 0x9 1 MB (Receive Array only)
10895 * 0xa 2 MB (Receive Array only)
10896 *
10897 * 0xB-0xF - reserved (Receive Array only)
10898 *
10899 *
10900 * This routine assumes that the value has already been sanity checked.
10901 */
10902static u32 encoded_size(u32 size)
10903{
10904 switch (size) {
10905 case 4*1024: return 0x1;
10906 case 8*1024: return 0x2;
10907 case 16*1024: return 0x3;
10908 case 32*1024: return 0x4;
10909 case 64*1024: return 0x5;
10910 case 128*1024: return 0x6;
10911 case 256*1024: return 0x7;
10912 case 512*1024: return 0x8;
10913 case 1*1024*1024: return 0x9;
10914 case 2*1024*1024: return 0xa;
10915 }
10916 return 0x1; /* if invalid, go with the minimum size */
10917}
10918
10919void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
10920{
10921 struct hfi1_ctxtdata *rcd;
10922 u64 rcvctrl, reg;
10923 int did_enable = 0;
10924
10925 rcd = dd->rcd[ctxt];
10926 if (!rcd)
10927 return;
10928
10929 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
10930
10931 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
10932 /* if the context already enabled, don't do the extra steps */
10933 if ((op & HFI1_RCVCTRL_CTXT_ENB)
10934 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
10935 /* reset the tail and hdr addresses, and sequence count */
10936 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
10937 rcd->rcvhdrq_phys);
10938 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
10939 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
10940 rcd->rcvhdrqtailaddr_phys);
10941 rcd->seq_cnt = 1;
10942
10943 /* reset the cached receive header queue head value */
10944 rcd->head = 0;
10945
10946 /*
10947 * Zero the receive header queue so we don't get false
10948 * positives when checking the sequence number. The
10949 * sequence numbers could land exactly on the same spot.
10950 * E.g. a rcd restart before the receive header wrapped.
10951 */
10952 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
10953
10954 /* starting timeout */
10955 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
10956
10957 /* enable the context */
10958 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
10959
10960 /* clean the egr buffer size first */
10961 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
10962 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
10963 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
10964 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
10965
10966 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
10967 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
10968 did_enable = 1;
10969
10970 /* zero RcvEgrIndexHead */
10971 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
10972
10973 /* set eager count and base index */
10974 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
10975 & RCV_EGR_CTRL_EGR_CNT_MASK)
10976 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
10977 (((rcd->eager_base >> RCV_SHIFT)
10978 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
10979 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
10980 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
10981
10982 /*
10983 * Set TID (expected) count and base index.
10984 * rcd->expected_count is set to individual RcvArray entries,
10985 * not pairs, and the CSR takes a pair-count in groups of
10986 * four, so divide by 8.
10987 */
10988 reg = (((rcd->expected_count >> RCV_SHIFT)
10989 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
10990 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
10991 (((rcd->expected_base >> RCV_SHIFT)
10992 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
10993 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
10994 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050010995 if (ctxt == HFI1_CTRL_CTXT)
10996 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010997 }
10998 if (op & HFI1_RCVCTRL_CTXT_DIS) {
10999 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011000 /*
11001 * When receive context is being disabled turn on tail
11002 * update with a dummy tail address and then disable
11003 * receive context.
11004 */
11005 if (dd->rcvhdrtail_dummy_physaddr) {
11006 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11007 dd->rcvhdrtail_dummy_physaddr);
11008 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11009 }
11010
Mike Marciniszyn77241052015-07-30 15:17:43 -040011011 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11012 }
11013 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11014 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11015 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11016 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11017 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11018 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11019 if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11020 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11021 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11022 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11023 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11024 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11025 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11026 /* In one-packet-per-eager mode, the size comes from
11027 the RcvArray entry. */
11028 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11029 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11030 }
11031 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11032 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11033 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11034 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11035 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11036 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11037 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11038 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11039 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11040 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11041 rcd->rcvctrl = rcvctrl;
11042 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11043 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11044
11045 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11046 if (did_enable
11047 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11048 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11049 if (reg != 0) {
11050 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11051 ctxt, reg);
11052 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11053 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11054 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11055 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11056 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11057 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11058 ctxt, reg, reg == 0 ? "not" : "still");
11059 }
11060 }
11061
11062 if (did_enable) {
11063 /*
11064 * The interrupt timeout and count must be set after
11065 * the context is enabled to take effect.
11066 */
11067 /* set interrupt timeout */
11068 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11069 (u64)rcd->rcvavail_timeout <<
11070 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11071
11072 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11073 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11074 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11075 }
11076
11077 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11078 /*
11079 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011080 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11081 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011082 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011083 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11084 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011085}
11086
11087u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11088 u64 **cntrp)
11089{
11090 int ret;
11091 u64 val = 0;
11092
11093 if (namep) {
11094 ret = dd->cntrnameslen;
11095 if (pos != 0) {
11096 dd_dev_err(dd, "read_cntrs does not support indexing");
11097 return 0;
11098 }
11099 *namep = dd->cntrnames;
11100 } else {
11101 const struct cntr_entry *entry;
11102 int i, j;
11103
11104 ret = (dd->ndevcntrs) * sizeof(u64);
11105 if (pos != 0) {
11106 dd_dev_err(dd, "read_cntrs does not support indexing");
11107 return 0;
11108 }
11109
11110 /* Get the start of the block of counters */
11111 *cntrp = dd->cntrs;
11112
11113 /*
11114 * Now go and fill in each counter in the block.
11115 */
11116 for (i = 0; i < DEV_CNTR_LAST; i++) {
11117 entry = &dev_cntrs[i];
11118 hfi1_cdbg(CNTR, "reading %s", entry->name);
11119 if (entry->flags & CNTR_DISABLED) {
11120 /* Nothing */
11121 hfi1_cdbg(CNTR, "\tDisabled\n");
11122 } else {
11123 if (entry->flags & CNTR_VL) {
11124 hfi1_cdbg(CNTR, "\tPer VL\n");
11125 for (j = 0; j < C_VL_COUNT; j++) {
11126 val = entry->rw_cntr(entry,
11127 dd, j,
11128 CNTR_MODE_R,
11129 0);
11130 hfi1_cdbg(
11131 CNTR,
11132 "\t\tRead 0x%llx for %d\n",
11133 val, j);
11134 dd->cntrs[entry->offset + j] =
11135 val;
11136 }
11137 } else {
11138 val = entry->rw_cntr(entry, dd,
11139 CNTR_INVALID_VL,
11140 CNTR_MODE_R, 0);
11141 dd->cntrs[entry->offset] = val;
11142 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11143 }
11144 }
11145 }
11146 }
11147 return ret;
11148}
11149
11150/*
11151 * Used by sysfs to create files for hfi stats to read
11152 */
11153u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11154 char **namep, u64 **cntrp)
11155{
11156 int ret;
11157 u64 val = 0;
11158
11159 if (namep) {
11160 ret = dd->portcntrnameslen;
11161 if (pos != 0) {
11162 dd_dev_err(dd, "index not supported");
11163 return 0;
11164 }
11165 *namep = dd->portcntrnames;
11166 } else {
11167 const struct cntr_entry *entry;
11168 struct hfi1_pportdata *ppd;
11169 int i, j;
11170
11171 ret = (dd->nportcntrs) * sizeof(u64);
11172 if (pos != 0) {
11173 dd_dev_err(dd, "indexing not supported");
11174 return 0;
11175 }
11176 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11177 *cntrp = ppd->cntrs;
11178
11179 for (i = 0; i < PORT_CNTR_LAST; i++) {
11180 entry = &port_cntrs[i];
11181 hfi1_cdbg(CNTR, "reading %s", entry->name);
11182 if (entry->flags & CNTR_DISABLED) {
11183 /* Nothing */
11184 hfi1_cdbg(CNTR, "\tDisabled\n");
11185 continue;
11186 }
11187
11188 if (entry->flags & CNTR_VL) {
11189 hfi1_cdbg(CNTR, "\tPer VL");
11190 for (j = 0; j < C_VL_COUNT; j++) {
11191 val = entry->rw_cntr(entry, ppd, j,
11192 CNTR_MODE_R,
11193 0);
11194 hfi1_cdbg(
11195 CNTR,
11196 "\t\tRead 0x%llx for %d",
11197 val, j);
11198 ppd->cntrs[entry->offset + j] = val;
11199 }
11200 } else {
11201 val = entry->rw_cntr(entry, ppd,
11202 CNTR_INVALID_VL,
11203 CNTR_MODE_R,
11204 0);
11205 ppd->cntrs[entry->offset] = val;
11206 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11207 }
11208 }
11209 }
11210 return ret;
11211}
11212
11213static void free_cntrs(struct hfi1_devdata *dd)
11214{
11215 struct hfi1_pportdata *ppd;
11216 int i;
11217
11218 if (dd->synth_stats_timer.data)
11219 del_timer_sync(&dd->synth_stats_timer);
11220 dd->synth_stats_timer.data = 0;
11221 ppd = (struct hfi1_pportdata *)(dd + 1);
11222 for (i = 0; i < dd->num_pports; i++, ppd++) {
11223 kfree(ppd->cntrs);
11224 kfree(ppd->scntrs);
11225 free_percpu(ppd->ibport_data.rc_acks);
11226 free_percpu(ppd->ibport_data.rc_qacks);
11227 free_percpu(ppd->ibport_data.rc_delayed_comp);
11228 ppd->cntrs = NULL;
11229 ppd->scntrs = NULL;
11230 ppd->ibport_data.rc_acks = NULL;
11231 ppd->ibport_data.rc_qacks = NULL;
11232 ppd->ibport_data.rc_delayed_comp = NULL;
11233 }
11234 kfree(dd->portcntrnames);
11235 dd->portcntrnames = NULL;
11236 kfree(dd->cntrs);
11237 dd->cntrs = NULL;
11238 kfree(dd->scntrs);
11239 dd->scntrs = NULL;
11240 kfree(dd->cntrnames);
11241 dd->cntrnames = NULL;
11242}
11243
11244#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11245#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11246
11247static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11248 u64 *psval, void *context, int vl)
11249{
11250 u64 val;
11251 u64 sval = *psval;
11252
11253 if (entry->flags & CNTR_DISABLED) {
11254 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11255 return 0;
11256 }
11257
11258 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11259
11260 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11261
11262 /* If its a synthetic counter there is more work we need to do */
11263 if (entry->flags & CNTR_SYNTH) {
11264 if (sval == CNTR_MAX) {
11265 /* No need to read already saturated */
11266 return CNTR_MAX;
11267 }
11268
11269 if (entry->flags & CNTR_32BIT) {
11270 /* 32bit counters can wrap multiple times */
11271 u64 upper = sval >> 32;
11272 u64 lower = (sval << 32) >> 32;
11273
11274 if (lower > val) { /* hw wrapped */
11275 if (upper == CNTR_32BIT_MAX)
11276 val = CNTR_MAX;
11277 else
11278 upper++;
11279 }
11280
11281 if (val != CNTR_MAX)
11282 val = (upper << 32) | val;
11283
11284 } else {
11285 /* If we rolled we are saturated */
11286 if ((val < sval) || (val > CNTR_MAX))
11287 val = CNTR_MAX;
11288 }
11289 }
11290
11291 *psval = val;
11292
11293 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11294
11295 return val;
11296}
11297
11298static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11299 struct cntr_entry *entry,
11300 u64 *psval, void *context, int vl, u64 data)
11301{
11302 u64 val;
11303
11304 if (entry->flags & CNTR_DISABLED) {
11305 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11306 return 0;
11307 }
11308
11309 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11310
11311 if (entry->flags & CNTR_SYNTH) {
11312 *psval = data;
11313 if (entry->flags & CNTR_32BIT) {
11314 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11315 (data << 32) >> 32);
11316 val = data; /* return the full 64bit value */
11317 } else {
11318 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11319 data);
11320 }
11321 } else {
11322 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11323 }
11324
11325 *psval = val;
11326
11327 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11328
11329 return val;
11330}
11331
11332u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11333{
11334 struct cntr_entry *entry;
11335 u64 *sval;
11336
11337 entry = &dev_cntrs[index];
11338 sval = dd->scntrs + entry->offset;
11339
11340 if (vl != CNTR_INVALID_VL)
11341 sval += vl;
11342
11343 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11344}
11345
11346u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11347{
11348 struct cntr_entry *entry;
11349 u64 *sval;
11350
11351 entry = &dev_cntrs[index];
11352 sval = dd->scntrs + entry->offset;
11353
11354 if (vl != CNTR_INVALID_VL)
11355 sval += vl;
11356
11357 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11358}
11359
11360u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11361{
11362 struct cntr_entry *entry;
11363 u64 *sval;
11364
11365 entry = &port_cntrs[index];
11366 sval = ppd->scntrs + entry->offset;
11367
11368 if (vl != CNTR_INVALID_VL)
11369 sval += vl;
11370
11371 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11372 (index <= C_RCV_HDR_OVF_LAST)) {
11373 /* We do not want to bother for disabled contexts */
11374 return 0;
11375 }
11376
11377 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11378}
11379
11380u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11381{
11382 struct cntr_entry *entry;
11383 u64 *sval;
11384
11385 entry = &port_cntrs[index];
11386 sval = ppd->scntrs + entry->offset;
11387
11388 if (vl != CNTR_INVALID_VL)
11389 sval += vl;
11390
11391 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11392 (index <= C_RCV_HDR_OVF_LAST)) {
11393 /* We do not want to bother for disabled contexts */
11394 return 0;
11395 }
11396
11397 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11398}
11399
11400static void update_synth_timer(unsigned long opaque)
11401{
11402 u64 cur_tx;
11403 u64 cur_rx;
11404 u64 total_flits;
11405 u8 update = 0;
11406 int i, j, vl;
11407 struct hfi1_pportdata *ppd;
11408 struct cntr_entry *entry;
11409
11410 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11411
11412 /*
11413 * Rather than keep beating on the CSRs pick a minimal set that we can
11414 * check to watch for potential roll over. We can do this by looking at
11415 * the number of flits sent/recv. If the total flits exceeds 32bits then
11416 * we have to iterate all the counters and update.
11417 */
11418 entry = &dev_cntrs[C_DC_RCV_FLITS];
11419 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11420
11421 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11422 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11423
11424 hfi1_cdbg(
11425 CNTR,
11426 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11427 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11428
11429 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11430 /*
11431 * May not be strictly necessary to update but it won't hurt and
11432 * simplifies the logic here.
11433 */
11434 update = 1;
11435 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11436 dd->unit);
11437 } else {
11438 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11439 hfi1_cdbg(CNTR,
11440 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11441 total_flits, (u64)CNTR_32BIT_MAX);
11442 if (total_flits >= CNTR_32BIT_MAX) {
11443 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11444 dd->unit);
11445 update = 1;
11446 }
11447 }
11448
11449 if (update) {
11450 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11451 for (i = 0; i < DEV_CNTR_LAST; i++) {
11452 entry = &dev_cntrs[i];
11453 if (entry->flags & CNTR_VL) {
11454 for (vl = 0; vl < C_VL_COUNT; vl++)
11455 read_dev_cntr(dd, i, vl);
11456 } else {
11457 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11458 }
11459 }
11460 ppd = (struct hfi1_pportdata *)(dd + 1);
11461 for (i = 0; i < dd->num_pports; i++, ppd++) {
11462 for (j = 0; j < PORT_CNTR_LAST; j++) {
11463 entry = &port_cntrs[j];
11464 if (entry->flags & CNTR_VL) {
11465 for (vl = 0; vl < C_VL_COUNT; vl++)
11466 read_port_cntr(ppd, j, vl);
11467 } else {
11468 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11469 }
11470 }
11471 }
11472
11473 /*
11474 * We want the value in the register. The goal is to keep track
11475 * of the number of "ticks" not the counter value. In other
11476 * words if the register rolls we want to notice it and go ahead
11477 * and force an update.
11478 */
11479 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11480 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11481 CNTR_MODE_R, 0);
11482
11483 entry = &dev_cntrs[C_DC_RCV_FLITS];
11484 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11485 CNTR_MODE_R, 0);
11486
11487 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11488 dd->unit, dd->last_tx, dd->last_rx);
11489
11490 } else {
11491 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11492 }
11493
11494mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11495}
11496
11497#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11498static int init_cntrs(struct hfi1_devdata *dd)
11499{
11500 int i, rcv_ctxts, index, j;
11501 size_t sz;
11502 char *p;
11503 char name[C_MAX_NAME];
11504 struct hfi1_pportdata *ppd;
11505
11506 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011507 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11508 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011509
11510 /***********************/
11511 /* per device counters */
11512 /***********************/
11513
11514 /* size names and determine how many we have*/
11515 dd->ndevcntrs = 0;
11516 sz = 0;
11517 index = 0;
11518
11519 for (i = 0; i < DEV_CNTR_LAST; i++) {
11520 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11521 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11522 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11523 continue;
11524 }
11525
11526 if (dev_cntrs[i].flags & CNTR_VL) {
11527 hfi1_dbg_early("\tProcessing VL cntr\n");
11528 dev_cntrs[i].offset = index;
11529 for (j = 0; j < C_VL_COUNT; j++) {
11530 memset(name, '\0', C_MAX_NAME);
11531 snprintf(name, C_MAX_NAME, "%s%d",
11532 dev_cntrs[i].name,
11533 vl_from_idx(j));
11534 sz += strlen(name);
11535 sz++;
11536 hfi1_dbg_early("\t\t%s\n", name);
11537 dd->ndevcntrs++;
11538 index++;
11539 }
11540 } else {
11541 /* +1 for newline */
11542 sz += strlen(dev_cntrs[i].name) + 1;
11543 dd->ndevcntrs++;
11544 dev_cntrs[i].offset = index;
11545 index++;
11546 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11547 }
11548 }
11549
11550 /* allocate space for the counter values */
11551 dd->cntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
11552 if (!dd->cntrs)
11553 goto bail;
11554
11555 dd->scntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
11556 if (!dd->scntrs)
11557 goto bail;
11558
11559
11560 /* allocate space for the counter names */
11561 dd->cntrnameslen = sz;
11562 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11563 if (!dd->cntrnames)
11564 goto bail;
11565
11566 /* fill in the names */
11567 for (p = dd->cntrnames, i = 0, index = 0; i < DEV_CNTR_LAST; i++) {
11568 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11569 /* Nothing */
11570 } else {
11571 if (dev_cntrs[i].flags & CNTR_VL) {
11572 for (j = 0; j < C_VL_COUNT; j++) {
11573 memset(name, '\0', C_MAX_NAME);
11574 snprintf(name, C_MAX_NAME, "%s%d",
11575 dev_cntrs[i].name,
11576 vl_from_idx(j));
11577 memcpy(p, name, strlen(name));
11578 p += strlen(name);
11579 *p++ = '\n';
11580 }
11581 } else {
11582 memcpy(p, dev_cntrs[i].name,
11583 strlen(dev_cntrs[i].name));
11584 p += strlen(dev_cntrs[i].name);
11585 *p++ = '\n';
11586 }
11587 index++;
11588 }
11589 }
11590
11591 /*********************/
11592 /* per port counters */
11593 /*********************/
11594
11595 /*
11596 * Go through the counters for the overflows and disable the ones we
11597 * don't need. This varies based on platform so we need to do it
11598 * dynamically here.
11599 */
11600 rcv_ctxts = dd->num_rcv_contexts;
11601 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11602 i <= C_RCV_HDR_OVF_LAST; i++) {
11603 port_cntrs[i].flags |= CNTR_DISABLED;
11604 }
11605
11606 /* size port counter names and determine how many we have*/
11607 sz = 0;
11608 dd->nportcntrs = 0;
11609 for (i = 0; i < PORT_CNTR_LAST; i++) {
11610 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11611 if (port_cntrs[i].flags & CNTR_DISABLED) {
11612 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11613 continue;
11614 }
11615
11616 if (port_cntrs[i].flags & CNTR_VL) {
11617 hfi1_dbg_early("\tProcessing VL cntr\n");
11618 port_cntrs[i].offset = dd->nportcntrs;
11619 for (j = 0; j < C_VL_COUNT; j++) {
11620 memset(name, '\0', C_MAX_NAME);
11621 snprintf(name, C_MAX_NAME, "%s%d",
11622 port_cntrs[i].name,
11623 vl_from_idx(j));
11624 sz += strlen(name);
11625 sz++;
11626 hfi1_dbg_early("\t\t%s\n", name);
11627 dd->nportcntrs++;
11628 }
11629 } else {
11630 /* +1 for newline */
11631 sz += strlen(port_cntrs[i].name) + 1;
11632 port_cntrs[i].offset = dd->nportcntrs;
11633 dd->nportcntrs++;
11634 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11635 }
11636 }
11637
11638 /* allocate space for the counter names */
11639 dd->portcntrnameslen = sz;
11640 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11641 if (!dd->portcntrnames)
11642 goto bail;
11643
11644 /* fill in port cntr names */
11645 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11646 if (port_cntrs[i].flags & CNTR_DISABLED)
11647 continue;
11648
11649 if (port_cntrs[i].flags & CNTR_VL) {
11650 for (j = 0; j < C_VL_COUNT; j++) {
11651 memset(name, '\0', C_MAX_NAME);
11652 snprintf(name, C_MAX_NAME, "%s%d",
11653 port_cntrs[i].name,
11654 vl_from_idx(j));
11655 memcpy(p, name, strlen(name));
11656 p += strlen(name);
11657 *p++ = '\n';
11658 }
11659 } else {
11660 memcpy(p, port_cntrs[i].name,
11661 strlen(port_cntrs[i].name));
11662 p += strlen(port_cntrs[i].name);
11663 *p++ = '\n';
11664 }
11665 }
11666
11667 /* allocate per port storage for counter values */
11668 ppd = (struct hfi1_pportdata *)(dd + 1);
11669 for (i = 0; i < dd->num_pports; i++, ppd++) {
11670 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11671 if (!ppd->cntrs)
11672 goto bail;
11673
11674 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11675 if (!ppd->scntrs)
11676 goto bail;
11677 }
11678
11679 /* CPU counters need to be allocated and zeroed */
11680 if (init_cpu_counters(dd))
11681 goto bail;
11682
11683 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11684 return 0;
11685bail:
11686 free_cntrs(dd);
11687 return -ENOMEM;
11688}
11689
11690
11691static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11692{
11693 switch (chip_lstate) {
11694 default:
11695 dd_dev_err(dd,
11696 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11697 chip_lstate);
11698 /* fall through */
11699 case LSTATE_DOWN:
11700 return IB_PORT_DOWN;
11701 case LSTATE_INIT:
11702 return IB_PORT_INIT;
11703 case LSTATE_ARMED:
11704 return IB_PORT_ARMED;
11705 case LSTATE_ACTIVE:
11706 return IB_PORT_ACTIVE;
11707 }
11708}
11709
11710u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11711{
11712 /* look at the HFI meta-states only */
11713 switch (chip_pstate & 0xf0) {
11714 default:
11715 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11716 chip_pstate);
11717 /* fall through */
11718 case PLS_DISABLED:
11719 return IB_PORTPHYSSTATE_DISABLED;
11720 case PLS_OFFLINE:
11721 return OPA_PORTPHYSSTATE_OFFLINE;
11722 case PLS_POLLING:
11723 return IB_PORTPHYSSTATE_POLLING;
11724 case PLS_CONFIGPHY:
11725 return IB_PORTPHYSSTATE_TRAINING;
11726 case PLS_LINKUP:
11727 return IB_PORTPHYSSTATE_LINKUP;
11728 case PLS_PHYTEST:
11729 return IB_PORTPHYSSTATE_PHY_TEST;
11730 }
11731}
11732
11733/* return the OPA port logical state name */
11734const char *opa_lstate_name(u32 lstate)
11735{
11736 static const char * const port_logical_names[] = {
11737 "PORT_NOP",
11738 "PORT_DOWN",
11739 "PORT_INIT",
11740 "PORT_ARMED",
11741 "PORT_ACTIVE",
11742 "PORT_ACTIVE_DEFER",
11743 };
11744 if (lstate < ARRAY_SIZE(port_logical_names))
11745 return port_logical_names[lstate];
11746 return "unknown";
11747}
11748
11749/* return the OPA port physical state name */
11750const char *opa_pstate_name(u32 pstate)
11751{
11752 static const char * const port_physical_names[] = {
11753 "PHYS_NOP",
11754 "reserved1",
11755 "PHYS_POLL",
11756 "PHYS_DISABLED",
11757 "PHYS_TRAINING",
11758 "PHYS_LINKUP",
11759 "PHYS_LINK_ERR_RECOVER",
11760 "PHYS_PHY_TEST",
11761 "reserved8",
11762 "PHYS_OFFLINE",
11763 "PHYS_GANGED",
11764 "PHYS_TEST",
11765 };
11766 if (pstate < ARRAY_SIZE(port_physical_names))
11767 return port_physical_names[pstate];
11768 return "unknown";
11769}
11770
11771/*
11772 * Read the hardware link state and set the driver's cached value of it.
11773 * Return the (new) current value.
11774 */
11775u32 get_logical_state(struct hfi1_pportdata *ppd)
11776{
11777 u32 new_state;
11778
11779 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11780 if (new_state != ppd->lstate) {
11781 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11782 opa_lstate_name(new_state), new_state);
11783 ppd->lstate = new_state;
11784 }
11785 /*
11786 * Set port status flags in the page mapped into userspace
11787 * memory. Do it here to ensure a reliable state - this is
11788 * the only function called by all state handling code.
11789 * Always set the flags due to the fact that the cache value
11790 * might have been changed explicitly outside of this
11791 * function.
11792 */
11793 if (ppd->statusp) {
11794 switch (ppd->lstate) {
11795 case IB_PORT_DOWN:
11796 case IB_PORT_INIT:
11797 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11798 HFI1_STATUS_IB_READY);
11799 break;
11800 case IB_PORT_ARMED:
11801 *ppd->statusp |= HFI1_STATUS_IB_CONF;
11802 break;
11803 case IB_PORT_ACTIVE:
11804 *ppd->statusp |= HFI1_STATUS_IB_READY;
11805 break;
11806 }
11807 }
11808 return ppd->lstate;
11809}
11810
11811/**
11812 * wait_logical_linkstate - wait for an IB link state change to occur
11813 * @ppd: port device
11814 * @state: the state to wait for
11815 * @msecs: the number of milliseconds to wait
11816 *
11817 * Wait up to msecs milliseconds for IB link state change to occur.
11818 * For now, take the easy polling route.
11819 * Returns 0 if state reached, otherwise -ETIMEDOUT.
11820 */
11821static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
11822 int msecs)
11823{
11824 unsigned long timeout;
11825
11826 timeout = jiffies + msecs_to_jiffies(msecs);
11827 while (1) {
11828 if (get_logical_state(ppd) == state)
11829 return 0;
11830 if (time_after(jiffies, timeout))
11831 break;
11832 msleep(20);
11833 }
11834 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
11835
11836 return -ETIMEDOUT;
11837}
11838
11839u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
11840{
11841 static u32 remembered_state = 0xff;
11842 u32 pstate;
11843 u32 ib_pstate;
11844
11845 pstate = read_physical_state(ppd->dd);
11846 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
11847 if (remembered_state != ib_pstate) {
11848 dd_dev_info(ppd->dd,
11849 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
11850 __func__, opa_pstate_name(ib_pstate), ib_pstate,
11851 pstate);
11852 remembered_state = ib_pstate;
11853 }
11854 return ib_pstate;
11855}
11856
11857/*
11858 * Read/modify/write ASIC_QSFP register bits as selected by mask
11859 * data: 0 or 1 in the positions depending on what needs to be written
11860 * dir: 0 for read, 1 for write
11861 * mask: select by setting
11862 * I2CCLK (bit 0)
11863 * I2CDATA (bit 1)
11864 */
11865u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
11866 u32 mask)
11867{
11868 u64 qsfp_oe, target_oe;
11869
11870 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
11871 if (mask) {
11872 /* We are writing register bits, so lock access */
11873 dir &= mask;
11874 data &= mask;
11875
11876 qsfp_oe = read_csr(dd, target_oe);
11877 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
11878 write_csr(dd, target_oe, qsfp_oe);
11879 }
11880 /* We are exclusively reading bits here, but it is unlikely
11881 * we'll get valid data when we set the direction of the pin
11882 * in the same call, so read should call this function again
11883 * to get valid data
11884 */
11885 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
11886}
11887
11888#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
11889(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
11890
11891#define SET_STATIC_RATE_CONTROL_SMASK(r) \
11892(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
11893
11894int hfi1_init_ctxt(struct send_context *sc)
11895{
11896 if (sc != NULL) {
11897 struct hfi1_devdata *dd = sc->dd;
11898 u64 reg;
11899 u8 set = (sc->type == SC_USER ?
11900 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
11901 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
11902 reg = read_kctxt_csr(dd, sc->hw_context,
11903 SEND_CTXT_CHECK_ENABLE);
11904 if (set)
11905 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
11906 else
11907 SET_STATIC_RATE_CONTROL_SMASK(reg);
11908 write_kctxt_csr(dd, sc->hw_context,
11909 SEND_CTXT_CHECK_ENABLE, reg);
11910 }
11911 return 0;
11912}
11913
11914int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
11915{
11916 int ret = 0;
11917 u64 reg;
11918
11919 if (dd->icode != ICODE_RTL_SILICON) {
11920 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11921 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
11922 __func__);
11923 return -EINVAL;
11924 }
11925 reg = read_csr(dd, ASIC_STS_THERM);
11926 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
11927 ASIC_STS_THERM_CURR_TEMP_MASK);
11928 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
11929 ASIC_STS_THERM_LO_TEMP_MASK);
11930 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
11931 ASIC_STS_THERM_HI_TEMP_MASK);
11932 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
11933 ASIC_STS_THERM_CRIT_TEMP_MASK);
11934 /* triggers is a 3-bit value - 1 bit per trigger. */
11935 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
11936
11937 return ret;
11938}
11939
11940/* ========================================================================= */
11941
11942/*
11943 * Enable/disable chip from delivering interrupts.
11944 */
11945void set_intr_state(struct hfi1_devdata *dd, u32 enable)
11946{
11947 int i;
11948
11949 /*
11950 * In HFI, the mask needs to be 1 to allow interrupts.
11951 */
11952 if (enable) {
11953 u64 cce_int_mask;
11954 const int qsfp1_int_smask = QSFP1_INT % 64;
11955 const int qsfp2_int_smask = QSFP2_INT % 64;
11956
11957 /* enable all interrupts */
11958 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11959 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
11960
11961 /*
11962 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
11963 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
11964 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
11965 * the index of the appropriate CSR in the CCEIntMask CSR array
11966 */
11967 cce_int_mask = read_csr(dd, CCE_INT_MASK +
11968 (8*(QSFP1_INT/64)));
11969 if (dd->hfi1_id) {
11970 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
11971 write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
11972 cce_int_mask);
11973 } else {
11974 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
11975 write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
11976 cce_int_mask);
11977 }
11978 } else {
11979 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11980 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
11981 }
11982}
11983
11984/*
11985 * Clear all interrupt sources on the chip.
11986 */
11987static void clear_all_interrupts(struct hfi1_devdata *dd)
11988{
11989 int i;
11990
11991 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
11992 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
11993
11994 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
11995 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
11996 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
11997 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
11998 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
11999 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12000 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12001 for (i = 0; i < dd->chip_send_contexts; i++)
12002 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12003 for (i = 0; i < dd->chip_sdma_engines; i++)
12004 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12005
12006 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12007 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12008 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12009}
12010
12011/* Move to pcie.c? */
12012static void disable_intx(struct pci_dev *pdev)
12013{
12014 pci_intx(pdev, 0);
12015}
12016
12017static void clean_up_interrupts(struct hfi1_devdata *dd)
12018{
12019 int i;
12020
12021 /* remove irqs - must happen before disabling/turning off */
12022 if (dd->num_msix_entries) {
12023 /* MSI-X */
12024 struct hfi1_msix_entry *me = dd->msix_entries;
12025
12026 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12027 if (me->arg == NULL) /* => no irq, no affinity */
12028 break;
12029 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12030 NULL);
12031 free_irq(me->msix.vector, me->arg);
12032 }
12033 } else {
12034 /* INTx */
12035 if (dd->requested_intx_irq) {
12036 free_irq(dd->pcidev->irq, dd);
12037 dd->requested_intx_irq = 0;
12038 }
12039 }
12040
12041 /* turn off interrupts */
12042 if (dd->num_msix_entries) {
12043 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012044 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012045 } else {
12046 /* INTx */
12047 disable_intx(dd->pcidev);
12048 }
12049
12050 /* clean structures */
12051 for (i = 0; i < dd->num_msix_entries; i++)
12052 free_cpumask_var(dd->msix_entries[i].mask);
12053 kfree(dd->msix_entries);
12054 dd->msix_entries = NULL;
12055 dd->num_msix_entries = 0;
12056}
12057
12058/*
12059 * Remap the interrupt source from the general handler to the given MSI-X
12060 * interrupt.
12061 */
12062static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12063{
12064 u64 reg;
12065 int m, n;
12066
12067 /* clear from the handled mask of the general interrupt */
12068 m = isrc / 64;
12069 n = isrc % 64;
12070 dd->gi_mask[m] &= ~((u64)1 << n);
12071
12072 /* direct the chip source to the given MSI-X interrupt */
12073 m = isrc / 8;
12074 n = isrc % 8;
12075 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12076 reg &= ~((u64)0xff << (8*n));
12077 reg |= ((u64)msix_intr & 0xff) << (8*n);
12078 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12079}
12080
12081static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12082 int engine, int msix_intr)
12083{
12084 /*
12085 * SDMA engine interrupt sources grouped by type, rather than
12086 * engine. Per-engine interrupts are as follows:
12087 * SDMA
12088 * SDMAProgress
12089 * SDMAIdle
12090 */
12091 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12092 msix_intr);
12093 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12094 msix_intr);
12095 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12096 msix_intr);
12097}
12098
Mike Marciniszyn77241052015-07-30 15:17:43 -040012099static int request_intx_irq(struct hfi1_devdata *dd)
12100{
12101 int ret;
12102
Jubin John98050712015-11-16 21:59:27 -050012103 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12104 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012105 ret = request_irq(dd->pcidev->irq, general_interrupt,
12106 IRQF_SHARED, dd->intx_name, dd);
12107 if (ret)
12108 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12109 ret);
12110 else
12111 dd->requested_intx_irq = 1;
12112 return ret;
12113}
12114
12115static int request_msix_irqs(struct hfi1_devdata *dd)
12116{
12117 const struct cpumask *local_mask;
12118 cpumask_var_t def, rcv;
12119 bool def_ret, rcv_ret;
12120 int first_general, last_general;
12121 int first_sdma, last_sdma;
12122 int first_rx, last_rx;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012123 int first_cpu, curr_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012124 int rcv_cpu, sdma_cpu;
12125 int i, ret = 0, possible;
12126 int ht;
12127
12128 /* calculate the ranges we are going to use */
12129 first_general = 0;
12130 first_sdma = last_general = first_general + 1;
12131 first_rx = last_sdma = first_sdma + dd->num_sdma;
12132 last_rx = first_rx + dd->n_krcv_queues;
12133
12134 /*
12135 * Interrupt affinity.
12136 *
12137 * non-rcv avail gets a default mask that
12138 * starts as possible cpus with threads reset
12139 * and each rcv avail reset.
12140 *
12141 * rcv avail gets node relative 1 wrapping back
12142 * to the node relative 1 as necessary.
12143 *
12144 */
12145 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12146 /* if first cpu is invalid, use NUMA 0 */
12147 if (cpumask_first(local_mask) >= nr_cpu_ids)
12148 local_mask = topology_core_cpumask(0);
12149
12150 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12151 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12152 if (!def_ret || !rcv_ret)
12153 goto bail;
12154 /* use local mask as default */
12155 cpumask_copy(def, local_mask);
12156 possible = cpumask_weight(def);
12157 /* disarm threads from default */
12158 ht = cpumask_weight(
12159 topology_sibling_cpumask(cpumask_first(local_mask)));
12160 for (i = possible/ht; i < possible; i++)
12161 cpumask_clear_cpu(i, def);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012162 /* def now has full cores on chosen node*/
12163 first_cpu = cpumask_first(def);
12164 if (nr_cpu_ids >= first_cpu)
12165 first_cpu++;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012166 curr_cpu = first_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012167
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012168 /* One context is reserved as control context */
12169 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012170 cpumask_clear_cpu(curr_cpu, def);
12171 cpumask_set_cpu(curr_cpu, rcv);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012172 curr_cpu = cpumask_next(curr_cpu, def);
12173 if (curr_cpu >= nr_cpu_ids)
12174 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012175 }
12176 /* def mask has non-rcv, rcv has recv mask */
12177 rcv_cpu = cpumask_first(rcv);
12178 sdma_cpu = cpumask_first(def);
12179
12180 /*
12181 * Sanity check - the code expects all SDMA chip source
12182 * interrupts to be in the same CSR, starting at bit 0. Verify
12183 * that this is true by checking the bit location of the start.
12184 */
12185 BUILD_BUG_ON(IS_SDMA_START % 64);
12186
12187 for (i = 0; i < dd->num_msix_entries; i++) {
12188 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12189 const char *err_info;
12190 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012191 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012192 void *arg;
12193 int idx;
12194 struct hfi1_ctxtdata *rcd = NULL;
12195 struct sdma_engine *sde = NULL;
12196
12197 /* obtain the arguments to request_irq */
12198 if (first_general <= i && i < last_general) {
12199 idx = i - first_general;
12200 handler = general_interrupt;
12201 arg = dd;
12202 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012203 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012204 err_info = "general";
12205 } else if (first_sdma <= i && i < last_sdma) {
12206 idx = i - first_sdma;
12207 sde = &dd->per_sdma[idx];
12208 handler = sdma_interrupt;
12209 arg = sde;
12210 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012211 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012212 err_info = "sdma";
12213 remap_sdma_interrupts(dd, idx, i);
12214 } else if (first_rx <= i && i < last_rx) {
12215 idx = i - first_rx;
12216 rcd = dd->rcd[idx];
12217 /* no interrupt if no rcd */
12218 if (!rcd)
12219 continue;
12220 /*
12221 * Set the interrupt register and mask for this
12222 * context's interrupt.
12223 */
12224 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12225 rcd->imask = ((u64)1) <<
12226 ((IS_RCVAVAIL_START+idx) % 64);
12227 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012228 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012229 arg = rcd;
12230 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012231 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012232 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012233 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012234 } else {
12235 /* not in our expected range - complain, then
12236 ignore it */
12237 dd_dev_err(dd,
12238 "Unexpected extra MSI-X interrupt %d\n", i);
12239 continue;
12240 }
12241 /* no argument, no interrupt */
12242 if (arg == NULL)
12243 continue;
12244 /* make sure the name is terminated */
12245 me->name[sizeof(me->name)-1] = 0;
12246
Dean Luickf4f30031c2015-10-26 10:28:44 -040012247 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12248 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012249 if (ret) {
12250 dd_dev_err(dd,
12251 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12252 err_info, me->msix.vector, idx, ret);
12253 return ret;
12254 }
12255 /*
12256 * assign arg after request_irq call, so it will be
12257 * cleaned up
12258 */
12259 me->arg = arg;
12260
12261 if (!zalloc_cpumask_var(
12262 &dd->msix_entries[i].mask,
12263 GFP_KERNEL))
12264 goto bail;
12265 if (handler == sdma_interrupt) {
12266 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12267 sde->this_idx, sdma_cpu);
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -050012268 sde->cpu = sdma_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012269 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12270 sdma_cpu = cpumask_next(sdma_cpu, def);
12271 if (sdma_cpu >= nr_cpu_ids)
12272 sdma_cpu = cpumask_first(def);
12273 } else if (handler == receive_context_interrupt) {
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012274 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12275 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12276 cpumask_first(def) : rcv_cpu);
12277 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12278 /* map to first default */
12279 cpumask_set_cpu(cpumask_first(def),
12280 dd->msix_entries[i].mask);
12281 } else {
12282 cpumask_set_cpu(rcv_cpu,
12283 dd->msix_entries[i].mask);
12284 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12285 if (rcv_cpu >= nr_cpu_ids)
12286 rcv_cpu = cpumask_first(rcv);
12287 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012288 } else {
12289 /* otherwise first def */
12290 dd_dev_info(dd, "%s cpu %d\n",
12291 err_info, cpumask_first(def));
12292 cpumask_set_cpu(
12293 cpumask_first(def), dd->msix_entries[i].mask);
12294 }
12295 irq_set_affinity_hint(
12296 dd->msix_entries[i].msix.vector,
12297 dd->msix_entries[i].mask);
12298 }
12299
12300out:
12301 free_cpumask_var(def);
12302 free_cpumask_var(rcv);
12303 return ret;
12304bail:
12305 ret = -ENOMEM;
12306 goto out;
12307}
12308
12309/*
12310 * Set the general handler to accept all interrupts, remap all
12311 * chip interrupts back to MSI-X 0.
12312 */
12313static void reset_interrupts(struct hfi1_devdata *dd)
12314{
12315 int i;
12316
12317 /* all interrupts handled by the general handler */
12318 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12319 dd->gi_mask[i] = ~(u64)0;
12320
12321 /* all chip interrupts map to MSI-X 0 */
12322 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12323 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12324}
12325
12326static int set_up_interrupts(struct hfi1_devdata *dd)
12327{
12328 struct hfi1_msix_entry *entries;
12329 u32 total, request;
12330 int i, ret;
12331 int single_interrupt = 0; /* we expect to have all the interrupts */
12332
12333 /*
12334 * Interrupt count:
12335 * 1 general, "slow path" interrupt (includes the SDMA engines
12336 * slow source, SDMACleanupDone)
12337 * N interrupts - one per used SDMA engine
12338 * M interrupt - one per kernel receive context
12339 */
12340 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12341
12342 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12343 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012344 ret = -ENOMEM;
12345 goto fail;
12346 }
12347 /* 1-1 MSI-X entry assignment */
12348 for (i = 0; i < total; i++)
12349 entries[i].msix.entry = i;
12350
12351 /* ask for MSI-X interrupts */
12352 request = total;
12353 request_msix(dd, &request, entries);
12354
12355 if (request == 0) {
12356 /* using INTx */
12357 /* dd->num_msix_entries already zero */
12358 kfree(entries);
12359 single_interrupt = 1;
12360 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12361 } else {
12362 /* using MSI-X */
12363 dd->num_msix_entries = request;
12364 dd->msix_entries = entries;
12365
12366 if (request != total) {
12367 /* using MSI-X, with reduced interrupts */
12368 dd_dev_err(
12369 dd,
12370 "cannot handle reduced interrupt case, want %u, got %u\n",
12371 total, request);
12372 ret = -EINVAL;
12373 goto fail;
12374 }
12375 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12376 }
12377
12378 /* mask all interrupts */
12379 set_intr_state(dd, 0);
12380 /* clear all pending interrupts */
12381 clear_all_interrupts(dd);
12382
12383 /* reset general handler mask, chip MSI-X mappings */
12384 reset_interrupts(dd);
12385
12386 if (single_interrupt)
12387 ret = request_intx_irq(dd);
12388 else
12389 ret = request_msix_irqs(dd);
12390 if (ret)
12391 goto fail;
12392
12393 return 0;
12394
12395fail:
12396 clean_up_interrupts(dd);
12397 return ret;
12398}
12399
12400/*
12401 * Set up context values in dd. Sets:
12402 *
12403 * num_rcv_contexts - number of contexts being used
12404 * n_krcv_queues - number of kernel contexts
12405 * first_user_ctxt - first non-kernel context in array of contexts
12406 * freectxts - number of free user contexts
12407 * num_send_contexts - number of PIO send contexts being used
12408 */
12409static int set_up_context_variables(struct hfi1_devdata *dd)
12410{
12411 int num_kernel_contexts;
12412 int num_user_contexts;
12413 int total_contexts;
12414 int ret;
12415 unsigned ngroups;
12416
12417 /*
12418 * Kernel contexts: (to be fixed later):
12419 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012420 * - Context 0 - control context (VL15/multicast/error)
12421 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012422 */
12423 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012424 /*
12425 * Don't count context 0 in n_krcvqs since
12426 * is isn't used for normal verbs traffic.
12427 *
12428 * krcvqs will reflect number of kernel
12429 * receive contexts above 0.
12430 */
12431 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012432 else
12433 num_kernel_contexts = num_online_nodes();
12434 num_kernel_contexts =
12435 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12436 /*
12437 * Every kernel receive context needs an ACK send context.
12438 * one send context is allocated for each VL{0-7} and VL15
12439 */
12440 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12441 dd_dev_err(dd,
12442 "Reducing # kernel rcv contexts to: %d, from %d\n",
12443 (int)(dd->chip_send_contexts - num_vls - 1),
12444 (int)num_kernel_contexts);
12445 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12446 }
12447 /*
12448 * User contexts: (to be fixed later)
12449 * - set to num_rcv_contexts if non-zero
12450 * - default to 1 user context per CPU
12451 */
12452 if (num_rcv_contexts)
12453 num_user_contexts = num_rcv_contexts;
12454 else
12455 num_user_contexts = num_online_cpus();
12456
12457 total_contexts = num_kernel_contexts + num_user_contexts;
12458
12459 /*
12460 * Adjust the counts given a global max.
12461 */
12462 if (total_contexts > dd->chip_rcv_contexts) {
12463 dd_dev_err(dd,
12464 "Reducing # user receive contexts to: %d, from %d\n",
12465 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12466 (int)num_user_contexts);
12467 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12468 /* recalculate */
12469 total_contexts = num_kernel_contexts + num_user_contexts;
12470 }
12471
12472 /* the first N are kernel contexts, the rest are user contexts */
12473 dd->num_rcv_contexts = total_contexts;
12474 dd->n_krcv_queues = num_kernel_contexts;
12475 dd->first_user_ctxt = num_kernel_contexts;
12476 dd->freectxts = num_user_contexts;
12477 dd_dev_info(dd,
12478 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12479 (int)dd->chip_rcv_contexts,
12480 (int)dd->num_rcv_contexts,
12481 (int)dd->n_krcv_queues,
12482 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12483
12484 /*
12485 * Receive array allocation:
12486 * All RcvArray entries are divided into groups of 8. This
12487 * is required by the hardware and will speed up writes to
12488 * consecutive entries by using write-combining of the entire
12489 * cacheline.
12490 *
12491 * The number of groups are evenly divided among all contexts.
12492 * any left over groups will be given to the first N user
12493 * contexts.
12494 */
12495 dd->rcv_entries.group_size = RCV_INCREMENT;
12496 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12497 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12498 dd->rcv_entries.nctxt_extra = ngroups -
12499 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12500 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12501 dd->rcv_entries.ngroups,
12502 dd->rcv_entries.nctxt_extra);
12503 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12504 MAX_EAGER_ENTRIES * 2) {
12505 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12506 dd->rcv_entries.group_size;
12507 dd_dev_info(dd,
12508 "RcvArray group count too high, change to %u\n",
12509 dd->rcv_entries.ngroups);
12510 dd->rcv_entries.nctxt_extra = 0;
12511 }
12512 /*
12513 * PIO send contexts
12514 */
12515 ret = init_sc_pools_and_sizes(dd);
12516 if (ret >= 0) { /* success */
12517 dd->num_send_contexts = ret;
12518 dd_dev_info(
12519 dd,
12520 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12521 dd->chip_send_contexts,
12522 dd->num_send_contexts,
12523 dd->sc_sizes[SC_KERNEL].count,
12524 dd->sc_sizes[SC_ACK].count,
12525 dd->sc_sizes[SC_USER].count);
12526 ret = 0; /* success */
12527 }
12528
12529 return ret;
12530}
12531
12532/*
12533 * Set the device/port partition key table. The MAD code
12534 * will ensure that, at least, the partial management
12535 * partition key is present in the table.
12536 */
12537static void set_partition_keys(struct hfi1_pportdata *ppd)
12538{
12539 struct hfi1_devdata *dd = ppd->dd;
12540 u64 reg = 0;
12541 int i;
12542
12543 dd_dev_info(dd, "Setting partition keys\n");
12544 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12545 reg |= (ppd->pkeys[i] &
12546 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12547 ((i % 4) *
12548 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12549 /* Each register holds 4 PKey values. */
12550 if ((i % 4) == 3) {
12551 write_csr(dd, RCV_PARTITION_KEY +
12552 ((i - 3) * 2), reg);
12553 reg = 0;
12554 }
12555 }
12556
12557 /* Always enable HW pkeys check when pkeys table is set */
12558 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12559}
12560
12561/*
12562 * These CSRs and memories are uninitialized on reset and must be
12563 * written before reading to set the ECC/parity bits.
12564 *
12565 * NOTE: All user context CSRs that are not mmaped write-only
12566 * (e.g. the TID flows) must be initialized even if the driver never
12567 * reads them.
12568 */
12569static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12570{
12571 int i, j;
12572
12573 /* CceIntMap */
12574 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12575 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12576
12577 /* SendCtxtCreditReturnAddr */
12578 for (i = 0; i < dd->chip_send_contexts; i++)
12579 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12580
12581 /* PIO Send buffers */
12582 /* SDMA Send buffers */
12583 /* These are not normally read, and (presently) have no method
12584 to be read, so are not pre-initialized */
12585
12586 /* RcvHdrAddr */
12587 /* RcvHdrTailAddr */
12588 /* RcvTidFlowTable */
12589 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12590 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12591 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12592 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12593 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12594 }
12595
12596 /* RcvArray */
12597 for (i = 0; i < dd->chip_rcv_array_count; i++)
12598 write_csr(dd, RCV_ARRAY + (8*i),
12599 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12600
12601 /* RcvQPMapTable */
12602 for (i = 0; i < 32; i++)
12603 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12604}
12605
12606/*
12607 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12608 */
12609static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12610 u64 ctrl_bits)
12611{
12612 unsigned long timeout;
12613 u64 reg;
12614
12615 /* is the condition present? */
12616 reg = read_csr(dd, CCE_STATUS);
12617 if ((reg & status_bits) == 0)
12618 return;
12619
12620 /* clear the condition */
12621 write_csr(dd, CCE_CTRL, ctrl_bits);
12622
12623 /* wait for the condition to clear */
12624 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12625 while (1) {
12626 reg = read_csr(dd, CCE_STATUS);
12627 if ((reg & status_bits) == 0)
12628 return;
12629 if (time_after(jiffies, timeout)) {
12630 dd_dev_err(dd,
12631 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12632 status_bits, reg & status_bits);
12633 return;
12634 }
12635 udelay(1);
12636 }
12637}
12638
12639/* set CCE CSRs to chip reset defaults */
12640static void reset_cce_csrs(struct hfi1_devdata *dd)
12641{
12642 int i;
12643
12644 /* CCE_REVISION read-only */
12645 /* CCE_REVISION2 read-only */
12646 /* CCE_CTRL - bits clear automatically */
12647 /* CCE_STATUS read-only, use CceCtrl to clear */
12648 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12649 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12650 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12651 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12652 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12653 /* CCE_ERR_STATUS read-only */
12654 write_csr(dd, CCE_ERR_MASK, 0);
12655 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12656 /* CCE_ERR_FORCE leave alone */
12657 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12658 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12659 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12660 /* CCE_PCIE_CTRL leave alone */
12661 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12662 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12663 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12664 CCE_MSIX_TABLE_UPPER_RESETCSR);
12665 }
12666 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12667 /* CCE_MSIX_PBA read-only */
12668 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12669 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12670 }
12671 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12672 write_csr(dd, CCE_INT_MAP, 0);
12673 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12674 /* CCE_INT_STATUS read-only */
12675 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12676 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12677 /* CCE_INT_FORCE leave alone */
12678 /* CCE_INT_BLOCKED read-only */
12679 }
12680 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12681 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12682}
12683
12684/* set ASIC CSRs to chip reset defaults */
12685static void reset_asic_csrs(struct hfi1_devdata *dd)
12686{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012687 int i;
12688
12689 /*
12690 * If the HFIs are shared between separate nodes or VMs,
12691 * then more will need to be done here. One idea is a module
12692 * parameter that returns early, letting the first power-on or
12693 * a known first load do the reset and blocking all others.
12694 */
12695
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012696 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12697 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012698
12699 if (dd->icode != ICODE_FPGA_EMULATION) {
12700 /* emulation does not have an SBus - leave these alone */
12701 /*
12702 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12703 * Notes:
12704 * o The reset is not zero if aimed at the core. See the
12705 * SBus documentation for details.
12706 * o If the SBus firmware has been updated (e.g. by the BIOS),
12707 * will the reset revert that?
12708 */
12709 /* ASIC_CFG_SBUS_REQUEST leave alone */
12710 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12711 }
12712 /* ASIC_SBUS_RESULT read-only */
12713 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12714 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12715 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12716 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012717
12718 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012719 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012720
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012721 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012722 /* ASIC_STS_THERM read-only */
12723 /* ASIC_CFG_RESET leave alone */
12724
12725 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12726 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12727 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12728 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12729 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12730 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12731 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12732 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12733 for (i = 0; i < 16; i++)
12734 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12735
12736 /* ASIC_GPIO_IN read-only */
12737 write_csr(dd, ASIC_GPIO_OE, 0);
12738 write_csr(dd, ASIC_GPIO_INVERT, 0);
12739 write_csr(dd, ASIC_GPIO_OUT, 0);
12740 write_csr(dd, ASIC_GPIO_MASK, 0);
12741 /* ASIC_GPIO_STATUS read-only */
12742 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12743 /* ASIC_GPIO_FORCE leave alone */
12744
12745 /* ASIC_QSFP1_IN read-only */
12746 write_csr(dd, ASIC_QSFP1_OE, 0);
12747 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12748 write_csr(dd, ASIC_QSFP1_OUT, 0);
12749 write_csr(dd, ASIC_QSFP1_MASK, 0);
12750 /* ASIC_QSFP1_STATUS read-only */
12751 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12752 /* ASIC_QSFP1_FORCE leave alone */
12753
12754 /* ASIC_QSFP2_IN read-only */
12755 write_csr(dd, ASIC_QSFP2_OE, 0);
12756 write_csr(dd, ASIC_QSFP2_INVERT, 0);
12757 write_csr(dd, ASIC_QSFP2_OUT, 0);
12758 write_csr(dd, ASIC_QSFP2_MASK, 0);
12759 /* ASIC_QSFP2_STATUS read-only */
12760 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12761 /* ASIC_QSFP2_FORCE leave alone */
12762
12763 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12764 /* this also writes a NOP command, clearing paging mode */
12765 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12766 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012767}
12768
12769/* set MISC CSRs to chip reset defaults */
12770static void reset_misc_csrs(struct hfi1_devdata *dd)
12771{
12772 int i;
12773
12774 for (i = 0; i < 32; i++) {
12775 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12776 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12777 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12778 }
12779 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12780 only be written 128-byte chunks */
12781 /* init RSA engine to clear lingering errors */
12782 write_csr(dd, MISC_CFG_RSA_CMD, 1);
12783 write_csr(dd, MISC_CFG_RSA_MU, 0);
12784 write_csr(dd, MISC_CFG_FW_CTRL, 0);
12785 /* MISC_STS_8051_DIGEST read-only */
12786 /* MISC_STS_SBM_DIGEST read-only */
12787 /* MISC_STS_PCIE_DIGEST read-only */
12788 /* MISC_STS_FAB_DIGEST read-only */
12789 /* MISC_ERR_STATUS read-only */
12790 write_csr(dd, MISC_ERR_MASK, 0);
12791 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12792 /* MISC_ERR_FORCE leave alone */
12793}
12794
12795/* set TXE CSRs to chip reset defaults */
12796static void reset_txe_csrs(struct hfi1_devdata *dd)
12797{
12798 int i;
12799
12800 /*
12801 * TXE Kernel CSRs
12802 */
12803 write_csr(dd, SEND_CTRL, 0);
12804 __cm_reset(dd, 0); /* reset CM internal state */
12805 /* SEND_CONTEXTS read-only */
12806 /* SEND_DMA_ENGINES read-only */
12807 /* SEND_PIO_MEM_SIZE read-only */
12808 /* SEND_DMA_MEM_SIZE read-only */
12809 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12810 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
12811 /* SEND_PIO_ERR_STATUS read-only */
12812 write_csr(dd, SEND_PIO_ERR_MASK, 0);
12813 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12814 /* SEND_PIO_ERR_FORCE leave alone */
12815 /* SEND_DMA_ERR_STATUS read-only */
12816 write_csr(dd, SEND_DMA_ERR_MASK, 0);
12817 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12818 /* SEND_DMA_ERR_FORCE leave alone */
12819 /* SEND_EGRESS_ERR_STATUS read-only */
12820 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12821 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12822 /* SEND_EGRESS_ERR_FORCE leave alone */
12823 write_csr(dd, SEND_BTH_QP, 0);
12824 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12825 write_csr(dd, SEND_SC2VLT0, 0);
12826 write_csr(dd, SEND_SC2VLT1, 0);
12827 write_csr(dd, SEND_SC2VLT2, 0);
12828 write_csr(dd, SEND_SC2VLT3, 0);
12829 write_csr(dd, SEND_LEN_CHECK0, 0);
12830 write_csr(dd, SEND_LEN_CHECK1, 0);
12831 /* SEND_ERR_STATUS read-only */
12832 write_csr(dd, SEND_ERR_MASK, 0);
12833 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
12834 /* SEND_ERR_FORCE read-only */
12835 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
12836 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
12837 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
12838 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
12839 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
12840 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
12841 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
12842 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
12843 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
12844 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
12845 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
12846 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
12847 SEND_CM_GLOBAL_CREDIT_RESETCSR);
12848 /* SEND_CM_CREDIT_USED_STATUS read-only */
12849 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
12850 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
12851 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
12852 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
12853 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
12854 for (i = 0; i < TXE_NUM_DATA_VL; i++)
12855 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
12856 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
12857 /* SEND_CM_CREDIT_USED_VL read-only */
12858 /* SEND_CM_CREDIT_USED_VL15 read-only */
12859 /* SEND_EGRESS_CTXT_STATUS read-only */
12860 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
12861 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
12862 /* SEND_EGRESS_ERR_INFO read-only */
12863 /* SEND_EGRESS_ERR_SOURCE read-only */
12864
12865 /*
12866 * TXE Per-Context CSRs
12867 */
12868 for (i = 0; i < dd->chip_send_contexts; i++) {
12869 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
12870 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
12871 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12872 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
12873 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
12874 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
12875 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
12876 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
12877 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
12878 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
12879 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
12880 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
12881 }
12882
12883 /*
12884 * TXE Per-SDMA CSRs
12885 */
12886 for (i = 0; i < dd->chip_sdma_engines; i++) {
12887 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
12888 /* SEND_DMA_STATUS read-only */
12889 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
12890 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
12891 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
12892 /* SEND_DMA_HEAD read-only */
12893 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
12894 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
12895 /* SEND_DMA_IDLE_CNT read-only */
12896 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
12897 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
12898 /* SEND_DMA_DESC_FETCHED_CNT read-only */
12899 /* SEND_DMA_ENG_ERR_STATUS read-only */
12900 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
12901 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
12902 /* SEND_DMA_ENG_ERR_FORCE leave alone */
12903 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
12904 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
12905 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
12906 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
12907 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
12908 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
12909 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
12910 }
12911}
12912
12913/*
12914 * Expect on entry:
12915 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
12916 */
12917static void init_rbufs(struct hfi1_devdata *dd)
12918{
12919 u64 reg;
12920 int count;
12921
12922 /*
12923 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
12924 * clear.
12925 */
12926 count = 0;
12927 while (1) {
12928 reg = read_csr(dd, RCV_STATUS);
12929 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
12930 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
12931 break;
12932 /*
12933 * Give up after 1ms - maximum wait time.
12934 *
12935 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
12936 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
12937 * 148 KB / (66% * 250MB/s) = 920us
12938 */
12939 if (count++ > 500) {
12940 dd_dev_err(dd,
12941 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
12942 __func__, reg);
12943 break;
12944 }
12945 udelay(2); /* do not busy-wait the CSR */
12946 }
12947
12948 /* start the init - expect RcvCtrl to be 0 */
12949 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
12950
12951 /*
12952 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
12953 * period after the write before RcvStatus.RxRbufInitDone is valid.
12954 * The delay in the first run through the loop below is sufficient and
12955 * required before the first read of RcvStatus.RxRbufInintDone.
12956 */
12957 read_csr(dd, RCV_CTRL);
12958
12959 /* wait for the init to finish */
12960 count = 0;
12961 while (1) {
12962 /* delay is required first time through - see above */
12963 udelay(2); /* do not busy-wait the CSR */
12964 reg = read_csr(dd, RCV_STATUS);
12965 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
12966 break;
12967
12968 /* give up after 100us - slowest possible at 33MHz is 73us */
12969 if (count++ > 50) {
12970 dd_dev_err(dd,
12971 "%s: RcvStatus.RxRbufInit not set, continuing\n",
12972 __func__);
12973 break;
12974 }
12975 }
12976}
12977
12978/* set RXE CSRs to chip reset defaults */
12979static void reset_rxe_csrs(struct hfi1_devdata *dd)
12980{
12981 int i, j;
12982
12983 /*
12984 * RXE Kernel CSRs
12985 */
12986 write_csr(dd, RCV_CTRL, 0);
12987 init_rbufs(dd);
12988 /* RCV_STATUS read-only */
12989 /* RCV_CONTEXTS read-only */
12990 /* RCV_ARRAY_CNT read-only */
12991 /* RCV_BUF_SIZE read-only */
12992 write_csr(dd, RCV_BTH_QP, 0);
12993 write_csr(dd, RCV_MULTICAST, 0);
12994 write_csr(dd, RCV_BYPASS, 0);
12995 write_csr(dd, RCV_VL15, 0);
12996 /* this is a clear-down */
12997 write_csr(dd, RCV_ERR_INFO,
12998 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
12999 /* RCV_ERR_STATUS read-only */
13000 write_csr(dd, RCV_ERR_MASK, 0);
13001 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13002 /* RCV_ERR_FORCE leave alone */
13003 for (i = 0; i < 32; i++)
13004 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13005 for (i = 0; i < 4; i++)
13006 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13007 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13008 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13009 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13010 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13011 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13012 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13013 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13014 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13015 }
13016 for (i = 0; i < 32; i++)
13017 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13018
13019 /*
13020 * RXE Kernel and User Per-Context CSRs
13021 */
13022 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13023 /* kernel */
13024 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13025 /* RCV_CTXT_STATUS read-only */
13026 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13027 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13028 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13029 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13030 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13031 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13032 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13033 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13034 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13035 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13036
13037 /* user */
13038 /* RCV_HDR_TAIL read-only */
13039 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13040 /* RCV_EGR_INDEX_TAIL read-only */
13041 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13042 /* RCV_EGR_OFFSET_TAIL read-only */
13043 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13044 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13045 0);
13046 }
13047 }
13048}
13049
13050/*
13051 * Set sc2vl tables.
13052 *
13053 * They power on to zeros, so to avoid send context errors
13054 * they need to be set:
13055 *
13056 * SC 0-7 -> VL 0-7 (respectively)
13057 * SC 15 -> VL 15
13058 * otherwise
13059 * -> VL 0
13060 */
13061static void init_sc2vl_tables(struct hfi1_devdata *dd)
13062{
13063 int i;
13064 /* init per architecture spec, constrained by hardware capability */
13065
13066 /* HFI maps sent packets */
13067 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13068 0,
13069 0, 0, 1, 1,
13070 2, 2, 3, 3,
13071 4, 4, 5, 5,
13072 6, 6, 7, 7));
13073 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13074 1,
13075 8, 0, 9, 0,
13076 10, 0, 11, 0,
13077 12, 0, 13, 0,
13078 14, 0, 15, 15));
13079 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13080 2,
13081 16, 0, 17, 0,
13082 18, 0, 19, 0,
13083 20, 0, 21, 0,
13084 22, 0, 23, 0));
13085 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13086 3,
13087 24, 0, 25, 0,
13088 26, 0, 27, 0,
13089 28, 0, 29, 0,
13090 30, 0, 31, 0));
13091
13092 /* DC maps received packets */
13093 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13094 15_0,
13095 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13096 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13097 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13098 31_16,
13099 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13100 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13101
13102 /* initialize the cached sc2vl values consistently with h/w */
13103 for (i = 0; i < 32; i++) {
13104 if (i < 8 || i == 15)
13105 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13106 else
13107 *((u8 *)(dd->sc2vl) + i) = 0;
13108 }
13109}
13110
13111/*
13112 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13113 * depend on the chip going through a power-on reset - a driver may be loaded
13114 * and unloaded many times.
13115 *
13116 * Do not write any CSR values to the chip in this routine - there may be
13117 * a reset following the (possible) FLR in this routine.
13118 *
13119 */
13120static void init_chip(struct hfi1_devdata *dd)
13121{
13122 int i;
13123
13124 /*
13125 * Put the HFI CSRs in a known state.
13126 * Combine this with a DC reset.
13127 *
13128 * Stop the device from doing anything while we do a
13129 * reset. We know there are no other active users of
13130 * the device since we are now in charge. Turn off
13131 * off all outbound and inbound traffic and make sure
13132 * the device does not generate any interrupts.
13133 */
13134
13135 /* disable send contexts and SDMA engines */
13136 write_csr(dd, SEND_CTRL, 0);
13137 for (i = 0; i < dd->chip_send_contexts; i++)
13138 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13139 for (i = 0; i < dd->chip_sdma_engines; i++)
13140 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13141 /* disable port (turn off RXE inbound traffic) and contexts */
13142 write_csr(dd, RCV_CTRL, 0);
13143 for (i = 0; i < dd->chip_rcv_contexts; i++)
13144 write_csr(dd, RCV_CTXT_CTRL, 0);
13145 /* mask all interrupt sources */
13146 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13147 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13148
13149 /*
13150 * DC Reset: do a full DC reset before the register clear.
13151 * A recommended length of time to hold is one CSR read,
13152 * so reread the CceDcCtrl. Then, hold the DC in reset
13153 * across the clear.
13154 */
13155 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13156 (void) read_csr(dd, CCE_DC_CTRL);
13157
13158 if (use_flr) {
13159 /*
13160 * A FLR will reset the SPC core and part of the PCIe.
13161 * The parts that need to be restored have already been
13162 * saved.
13163 */
13164 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13165
13166 /* do the FLR, the DC reset will remain */
13167 hfi1_pcie_flr(dd);
13168
13169 /* restore command and BARs */
13170 restore_pci_variables(dd);
13171
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013172 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013173 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13174 hfi1_pcie_flr(dd);
13175 restore_pci_variables(dd);
13176 }
13177
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013178 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013179 } else {
13180 dd_dev_info(dd, "Resetting CSRs with writes\n");
13181 reset_cce_csrs(dd);
13182 reset_txe_csrs(dd);
13183 reset_rxe_csrs(dd);
13184 reset_asic_csrs(dd);
13185 reset_misc_csrs(dd);
13186 }
13187 /* clear the DC reset */
13188 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013189
Mike Marciniszyn77241052015-07-30 15:17:43 -040013190 /* Set the LED off */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013191 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013192 setextled(dd, 0);
13193 /*
13194 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013195 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013196 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013197 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013198 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013199 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013200 * I2CCLK and I2CDAT will change per direction, and INT_N and
13201 * MODPRS_N are input only and their value is ignored.
13202 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013203 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13204 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013205}
13206
13207static void init_early_variables(struct hfi1_devdata *dd)
13208{
13209 int i;
13210
13211 /* assign link credit variables */
13212 dd->vau = CM_VAU;
13213 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013214 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013215 dd->link_credits--;
13216 dd->vcu = cu_to_vcu(hfi1_cu);
13217 /* enough room for 8 MAD packets plus header - 17K */
13218 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13219 if (dd->vl15_init > dd->link_credits)
13220 dd->vl15_init = dd->link_credits;
13221
13222 write_uninitialized_csrs_and_memories(dd);
13223
13224 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13225 for (i = 0; i < dd->num_pports; i++) {
13226 struct hfi1_pportdata *ppd = &dd->pport[i];
13227
13228 set_partition_keys(ppd);
13229 }
13230 init_sc2vl_tables(dd);
13231}
13232
13233static void init_kdeth_qp(struct hfi1_devdata *dd)
13234{
13235 /* user changed the KDETH_QP */
13236 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13237 /* out of range or illegal value */
13238 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13239 kdeth_qp = 0;
13240 }
13241 if (kdeth_qp == 0) /* not set, or failed range check */
13242 kdeth_qp = DEFAULT_KDETH_QP;
13243
13244 write_csr(dd, SEND_BTH_QP,
13245 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13246 << SEND_BTH_QP_KDETH_QP_SHIFT);
13247
13248 write_csr(dd, RCV_BTH_QP,
13249 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13250 << RCV_BTH_QP_KDETH_QP_SHIFT);
13251}
13252
13253/**
13254 * init_qpmap_table
13255 * @dd - device data
13256 * @first_ctxt - first context
13257 * @last_ctxt - first context
13258 *
13259 * This return sets the qpn mapping table that
13260 * is indexed by qpn[8:1].
13261 *
13262 * The routine will round robin the 256 settings
13263 * from first_ctxt to last_ctxt.
13264 *
13265 * The first/last looks ahead to having specialized
13266 * receive contexts for mgmt and bypass. Normal
13267 * verbs traffic will assumed to be on a range
13268 * of receive contexts.
13269 */
13270static void init_qpmap_table(struct hfi1_devdata *dd,
13271 u32 first_ctxt,
13272 u32 last_ctxt)
13273{
13274 u64 reg = 0;
13275 u64 regno = RCV_QP_MAP_TABLE;
13276 int i;
13277 u64 ctxt = first_ctxt;
13278
13279 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013280 reg |= ctxt << (8 * (i % 8));
13281 i++;
13282 ctxt++;
13283 if (ctxt > last_ctxt)
13284 ctxt = first_ctxt;
13285 if (i % 8 == 0) {
13286 write_csr(dd, regno, reg);
13287 reg = 0;
13288 regno += 8;
13289 }
13290 }
13291 if (i % 8)
13292 write_csr(dd, regno, reg);
13293
13294 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13295 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13296}
13297
13298/**
13299 * init_qos - init RX qos
13300 * @dd - device data
13301 * @first_context
13302 *
13303 * This routine initializes Rule 0 and the
13304 * RSM map table to implement qos.
13305 *
13306 * If all of the limit tests succeed,
13307 * qos is applied based on the array
13308 * interpretation of krcvqs where
13309 * entry 0 is VL0.
13310 *
13311 * The number of vl bits (n) and the number of qpn
13312 * bits (m) are computed to feed both the RSM map table
13313 * and the single rule.
13314 *
13315 */
13316static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13317{
13318 u8 max_by_vl = 0;
13319 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13320 u64 *rsmmap;
13321 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013322 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013323
13324 /* validate */
13325 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13326 num_vls == 1 ||
13327 krcvqsset <= 1)
13328 goto bail;
13329 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13330 if (krcvqs[i] > max_by_vl)
13331 max_by_vl = krcvqs[i];
13332 if (max_by_vl > 32)
13333 goto bail;
13334 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13335 /* determine bits vl */
13336 n = ilog2(num_vls);
13337 /* determine bits for qpn */
13338 m = ilog2(qpns_per_vl);
13339 if ((m + n) > 7)
13340 goto bail;
13341 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13342 goto bail;
13343 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13344 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13345 /* init the local copy of the table */
13346 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13347 unsigned tctxt;
13348
13349 for (qpn = 0, tctxt = ctxt;
13350 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13351 unsigned idx, regoff, regidx;
13352
13353 /* generate index <= 128 */
13354 idx = (qpn << n) ^ i;
13355 regoff = (idx % 8) * 8;
13356 regidx = idx / 8;
13357 reg = rsmmap[regidx];
13358 /* replace 0xff with context number */
13359 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13360 << regoff);
13361 reg |= (u64)(tctxt++) << regoff;
13362 rsmmap[regidx] = reg;
13363 if (tctxt == ctxt + krcvqs[i])
13364 tctxt = ctxt;
13365 }
13366 ctxt += krcvqs[i];
13367 }
13368 /* flush cached copies to chip */
13369 for (i = 0; i < NUM_MAP_REGS; i++)
13370 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13371 /* add rule0 */
13372 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13373 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13374 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13375 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13376 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13377 LRH_BTH_MATCH_OFFSET
13378 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13379 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13380 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13381 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13382 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13383 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13384 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13385 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13386 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13387 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13388 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13389 /* Enable RSM */
13390 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13391 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013392 /* map everything else to first context */
13393 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013394 dd->qos_shift = n + 1;
13395 return;
13396bail:
13397 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013398 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013399}
13400
13401static void init_rxe(struct hfi1_devdata *dd)
13402{
13403 /* enable all receive errors */
13404 write_csr(dd, RCV_ERR_MASK, ~0ull);
13405 /* setup QPN map table - start where VL15 context leaves off */
13406 init_qos(
13407 dd,
13408 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13409 /*
13410 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13411 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13412 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13413 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13414 * Max_PayLoad_Size set to its minimum of 128.
13415 *
13416 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13417 * (64 bytes). Max_Payload_Size is possibly modified upward in
13418 * tune_pcie_caps() which is called after this routine.
13419 */
13420}
13421
13422static void init_other(struct hfi1_devdata *dd)
13423{
13424 /* enable all CCE errors */
13425 write_csr(dd, CCE_ERR_MASK, ~0ull);
13426 /* enable *some* Misc errors */
13427 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13428 /* enable all DC errors, except LCB */
13429 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13430 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13431}
13432
13433/*
13434 * Fill out the given AU table using the given CU. A CU is defined in terms
13435 * AUs. The table is a an encoding: given the index, how many AUs does that
13436 * represent?
13437 *
13438 * NOTE: Assumes that the register layout is the same for the
13439 * local and remote tables.
13440 */
13441static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13442 u32 csr0to3, u32 csr4to7)
13443{
13444 write_csr(dd, csr0to3,
13445 0ull <<
13446 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13447 | 1ull <<
13448 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13449 | 2ull * cu <<
13450 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13451 | 4ull * cu <<
13452 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13453 write_csr(dd, csr4to7,
13454 8ull * cu <<
13455 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13456 | 16ull * cu <<
13457 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13458 | 32ull * cu <<
13459 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13460 | 64ull * cu <<
13461 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13462
13463}
13464
13465static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13466{
13467 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13468 SEND_CM_LOCAL_AU_TABLE4_TO7);
13469}
13470
13471void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13472{
13473 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13474 SEND_CM_REMOTE_AU_TABLE4_TO7);
13475}
13476
13477static void init_txe(struct hfi1_devdata *dd)
13478{
13479 int i;
13480
13481 /* enable all PIO, SDMA, general, and Egress errors */
13482 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13483 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13484 write_csr(dd, SEND_ERR_MASK, ~0ull);
13485 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13486
13487 /* enable all per-context and per-SDMA engine errors */
13488 for (i = 0; i < dd->chip_send_contexts; i++)
13489 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13490 for (i = 0; i < dd->chip_sdma_engines; i++)
13491 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13492
13493 /* set the local CU to AU mapping */
13494 assign_local_cm_au_table(dd, dd->vcu);
13495
13496 /*
13497 * Set reasonable default for Credit Return Timer
13498 * Don't set on Simulator - causes it to choke.
13499 */
13500 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13501 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13502}
13503
13504int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13505{
13506 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13507 unsigned sctxt;
13508 int ret = 0;
13509 u64 reg;
13510
13511 if (!rcd || !rcd->sc) {
13512 ret = -EINVAL;
13513 goto done;
13514 }
13515 sctxt = rcd->sc->hw_context;
13516 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13517 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13518 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13519 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13520 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13521 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13522 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13523 /*
13524 * Enable send-side J_KEY integrity check, unless this is A0 h/w
13525 * (due to A0 erratum).
13526 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013527 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013528 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13529 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13530 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13531 }
13532
13533 /* Enable J_KEY check on receive context. */
13534 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13535 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13536 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13537 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13538done:
13539 return ret;
13540}
13541
13542int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13543{
13544 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13545 unsigned sctxt;
13546 int ret = 0;
13547 u64 reg;
13548
13549 if (!rcd || !rcd->sc) {
13550 ret = -EINVAL;
13551 goto done;
13552 }
13553 sctxt = rcd->sc->hw_context;
13554 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13555 /*
13556 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13557 * This check would not have been enabled for A0 h/w, see
13558 * set_ctxt_jkey().
13559 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013560 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013561 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13562 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13563 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13564 }
13565 /* Turn off the J_KEY on the receive side */
13566 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13567done:
13568 return ret;
13569}
13570
13571int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13572{
13573 struct hfi1_ctxtdata *rcd;
13574 unsigned sctxt;
13575 int ret = 0;
13576 u64 reg;
13577
13578 if (ctxt < dd->num_rcv_contexts)
13579 rcd = dd->rcd[ctxt];
13580 else {
13581 ret = -EINVAL;
13582 goto done;
13583 }
13584 if (!rcd || !rcd->sc) {
13585 ret = -EINVAL;
13586 goto done;
13587 }
13588 sctxt = rcd->sc->hw_context;
13589 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13590 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13591 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13592 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13593 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13594 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13595done:
13596 return ret;
13597}
13598
13599int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13600{
13601 struct hfi1_ctxtdata *rcd;
13602 unsigned sctxt;
13603 int ret = 0;
13604 u64 reg;
13605
13606 if (ctxt < dd->num_rcv_contexts)
13607 rcd = dd->rcd[ctxt];
13608 else {
13609 ret = -EINVAL;
13610 goto done;
13611 }
13612 if (!rcd || !rcd->sc) {
13613 ret = -EINVAL;
13614 goto done;
13615 }
13616 sctxt = rcd->sc->hw_context;
13617 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13618 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13619 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13620 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13621done:
13622 return ret;
13623}
13624
13625/*
13626 * Start doing the clean up the the chip. Our clean up happens in multiple
13627 * stages and this is just the first.
13628 */
13629void hfi1_start_cleanup(struct hfi1_devdata *dd)
13630{
13631 free_cntrs(dd);
13632 free_rcverr(dd);
13633 clean_up_interrupts(dd);
13634}
13635
13636#define HFI_BASE_GUID(dev) \
13637 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13638
13639/*
13640 * Certain chip functions need to be initialized only once per asic
13641 * instead of per-device. This function finds the peer device and
13642 * checks whether that chip initialization needs to be done by this
13643 * device.
13644 */
13645static void asic_should_init(struct hfi1_devdata *dd)
13646{
13647 unsigned long flags;
13648 struct hfi1_devdata *tmp, *peer = NULL;
13649
13650 spin_lock_irqsave(&hfi1_devs_lock, flags);
13651 /* Find our peer device */
13652 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13653 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13654 dd->unit != tmp->unit) {
13655 peer = tmp;
13656 break;
13657 }
13658 }
13659
13660 /*
13661 * "Claim" the ASIC for initialization if it hasn't been
13662 " "claimed" yet.
13663 */
13664 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13665 dd->flags |= HFI1_DO_INIT_ASIC;
13666 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13667}
13668
Dean Luick5d9157a2015-11-16 21:59:34 -050013669/*
13670 * Set dd->boardname. Use a generic name if a name is not returned from
13671 * EFI variable space.
13672 *
13673 * Return 0 on success, -ENOMEM if space could not be allocated.
13674 */
13675static int obtain_boardname(struct hfi1_devdata *dd)
13676{
13677 /* generic board description */
13678 const char generic[] =
13679 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13680 unsigned long size;
13681 int ret;
13682
13683 ret = read_hfi1_efi_var(dd, "description", &size,
13684 (void **)&dd->boardname);
13685 if (ret) {
13686 dd_dev_err(dd, "Board description not found\n");
13687 /* use generic description */
13688 dd->boardname = kstrdup(generic, GFP_KERNEL);
13689 if (!dd->boardname)
13690 return -ENOMEM;
13691 }
13692 return 0;
13693}
13694
Mike Marciniszyn77241052015-07-30 15:17:43 -040013695/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013696 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013697 * @dev: the pci_dev for hfi1_ib device
13698 * @ent: pci_device_id struct for this dev
13699 *
13700 * Also allocates, initializes, and returns the devdata struct for this
13701 * device instance
13702 *
13703 * This is global, and is called directly at init to set up the
13704 * chip-specific function pointers for later use.
13705 */
13706struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13707 const struct pci_device_id *ent)
13708{
13709 struct hfi1_devdata *dd;
13710 struct hfi1_pportdata *ppd;
13711 u64 reg;
13712 int i, ret;
13713 static const char * const inames[] = { /* implementation names */
13714 "RTL silicon",
13715 "RTL VCS simulation",
13716 "RTL FPGA emulation",
13717 "Functional simulator"
13718 };
13719
13720 dd = hfi1_alloc_devdata(pdev,
13721 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13722 if (IS_ERR(dd))
13723 goto bail;
13724 ppd = dd->pport;
13725 for (i = 0; i < dd->num_pports; i++, ppd++) {
13726 int vl;
13727 /* init common fields */
13728 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13729 /* DC supports 4 link widths */
13730 ppd->link_width_supported =
13731 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13732 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13733 ppd->link_width_downgrade_supported =
13734 ppd->link_width_supported;
13735 /* start out enabling only 4X */
13736 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13737 ppd->link_width_downgrade_enabled =
13738 ppd->link_width_downgrade_supported;
13739 /* link width active is 0 when link is down */
13740 /* link width downgrade active is 0 when link is down */
13741
13742 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13743 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13744 hfi1_early_err(&pdev->dev,
13745 "Invalid num_vls %u, using %u VLs\n",
13746 num_vls, HFI1_MAX_VLS_SUPPORTED);
13747 num_vls = HFI1_MAX_VLS_SUPPORTED;
13748 }
13749 ppd->vls_supported = num_vls;
13750 ppd->vls_operational = ppd->vls_supported;
13751 /* Set the default MTU. */
13752 for (vl = 0; vl < num_vls; vl++)
13753 dd->vld[vl].mtu = hfi1_max_mtu;
13754 dd->vld[15].mtu = MAX_MAD_PACKET;
13755 /*
13756 * Set the initial values to reasonable default, will be set
13757 * for real when link is up.
13758 */
13759 ppd->lstate = IB_PORT_DOWN;
13760 ppd->overrun_threshold = 0x4;
13761 ppd->phy_error_threshold = 0xf;
13762 ppd->port_crc_mode_enabled = link_crc_mask;
13763 /* initialize supported LTP CRC mode */
13764 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13765 /* initialize enabled LTP CRC mode */
13766 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13767 /* start in offline */
13768 ppd->host_link_state = HLS_DN_OFFLINE;
13769 init_vl_arb_caches(ppd);
13770 }
13771
13772 dd->link_default = HLS_DN_POLL;
13773
13774 /*
13775 * Do remaining PCIe setup and save PCIe values in dd.
13776 * Any error printing is already done by the init code.
13777 * On return, we have the chip mapped.
13778 */
13779 ret = hfi1_pcie_ddinit(dd, pdev, ent);
13780 if (ret < 0)
13781 goto bail_free;
13782
13783 /* verify that reads actually work, save revision for reset check */
13784 dd->revision = read_csr(dd, CCE_REVISION);
13785 if (dd->revision == ~(u64)0) {
13786 dd_dev_err(dd, "cannot read chip CSRs\n");
13787 ret = -EINVAL;
13788 goto bail_cleanup;
13789 }
13790 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13791 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13792 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13793 & CCE_REVISION_CHIP_REV_MINOR_MASK;
13794
13795 /* obtain the hardware ID - NOT related to unit, which is a
13796 software enumeration */
13797 reg = read_csr(dd, CCE_REVISION2);
13798 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13799 & CCE_REVISION2_HFI_ID_MASK;
13800 /* the variable size will remove unwanted bits */
13801 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13802 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13803 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13804 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13805 (int)dd->irev);
13806
13807 /* speeds the hardware can support */
13808 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13809 /* speeds allowed to run at */
13810 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13811 /* give a reasonable active value, will be set on link up */
13812 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13813
13814 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13815 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13816 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13817 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13818 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13819 /* fix up link widths for emulation _p */
13820 ppd = dd->pport;
13821 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13822 ppd->link_width_supported =
13823 ppd->link_width_enabled =
13824 ppd->link_width_downgrade_supported =
13825 ppd->link_width_downgrade_enabled =
13826 OPA_LINK_WIDTH_1X;
13827 }
13828 /* insure num_vls isn't larger than number of sdma engines */
13829 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
13830 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050013831 num_vls, dd->chip_sdma_engines);
13832 num_vls = dd->chip_sdma_engines;
13833 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013834 }
13835
13836 /*
13837 * Convert the ns parameter to the 64 * cclocks used in the CSR.
13838 * Limit the max if larger than the field holds. If timeout is
13839 * non-zero, then the calculated field will be at least 1.
13840 *
13841 * Must be after icode is set up - the cclock rate depends
13842 * on knowing the hardware being used.
13843 */
13844 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
13845 if (dd->rcv_intr_timeout_csr >
13846 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
13847 dd->rcv_intr_timeout_csr =
13848 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
13849 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
13850 dd->rcv_intr_timeout_csr = 1;
13851
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013852 /* needs to be done before we look for the peer device */
13853 read_guid(dd);
13854
13855 /* should this device init the ASIC block? */
13856 asic_should_init(dd);
13857
Mike Marciniszyn77241052015-07-30 15:17:43 -040013858 /* obtain chip sizes, reset chip CSRs */
13859 init_chip(dd);
13860
13861 /* read in the PCIe link speed information */
13862 ret = pcie_speeds(dd);
13863 if (ret)
13864 goto bail_cleanup;
13865
Mike Marciniszyn77241052015-07-30 15:17:43 -040013866 /* read in firmware */
13867 ret = hfi1_firmware_init(dd);
13868 if (ret)
13869 goto bail_cleanup;
13870
13871 /*
13872 * In general, the PCIe Gen3 transition must occur after the
13873 * chip has been idled (so it won't initiate any PCIe transactions
13874 * e.g. an interrupt) and before the driver changes any registers
13875 * (the transition will reset the registers).
13876 *
13877 * In particular, place this call after:
13878 * - init_chip() - the chip will not initiate any PCIe transactions
13879 * - pcie_speeds() - reads the current link speed
13880 * - hfi1_firmware_init() - the needed firmware is ready to be
13881 * downloaded
13882 */
13883 ret = do_pcie_gen3_transition(dd);
13884 if (ret)
13885 goto bail_cleanup;
13886
13887 /* start setting dd values and adjusting CSRs */
13888 init_early_variables(dd);
13889
13890 parse_platform_config(dd);
13891
Dean Luick5d9157a2015-11-16 21:59:34 -050013892 ret = obtain_boardname(dd);
13893 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013894 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013895
13896 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050013897 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013898 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040013899 (u32)dd->majrev,
13900 (u32)dd->minrev,
13901 (dd->revision >> CCE_REVISION_SW_SHIFT)
13902 & CCE_REVISION_SW_MASK);
13903
13904 ret = set_up_context_variables(dd);
13905 if (ret)
13906 goto bail_cleanup;
13907
13908 /* set initial RXE CSRs */
13909 init_rxe(dd);
13910 /* set initial TXE CSRs */
13911 init_txe(dd);
13912 /* set initial non-RXE, non-TXE CSRs */
13913 init_other(dd);
13914 /* set up KDETH QP prefix in both RX and TX CSRs */
13915 init_kdeth_qp(dd);
13916
13917 /* send contexts must be set up before receive contexts */
13918 ret = init_send_contexts(dd);
13919 if (ret)
13920 goto bail_cleanup;
13921
13922 ret = hfi1_create_ctxts(dd);
13923 if (ret)
13924 goto bail_cleanup;
13925
13926 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
13927 /*
13928 * rcd[0] is guaranteed to be valid by this point. Also, all
13929 * context are using the same value, as per the module parameter.
13930 */
13931 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
13932
13933 ret = init_pervl_scs(dd);
13934 if (ret)
13935 goto bail_cleanup;
13936
13937 /* sdma init */
13938 for (i = 0; i < dd->num_pports; ++i) {
13939 ret = sdma_init(dd, i);
13940 if (ret)
13941 goto bail_cleanup;
13942 }
13943
13944 /* use contexts created by hfi1_create_ctxts */
13945 ret = set_up_interrupts(dd);
13946 if (ret)
13947 goto bail_cleanup;
13948
13949 /* set up LCB access - must be after set_up_interrupts() */
13950 init_lcb_access(dd);
13951
13952 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
13953 dd->base_guid & 0xFFFFFF);
13954
13955 dd->oui1 = dd->base_guid >> 56 & 0xFF;
13956 dd->oui2 = dd->base_guid >> 48 & 0xFF;
13957 dd->oui3 = dd->base_guid >> 40 & 0xFF;
13958
13959 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
13960 if (ret)
13961 goto bail_clear_intr;
13962 check_fabric_firmware_versions(dd);
13963
13964 thermal_init(dd);
13965
13966 ret = init_cntrs(dd);
13967 if (ret)
13968 goto bail_clear_intr;
13969
13970 ret = init_rcverr(dd);
13971 if (ret)
13972 goto bail_free_cntrs;
13973
13974 ret = eprom_init(dd);
13975 if (ret)
13976 goto bail_free_rcverr;
13977
13978 goto bail;
13979
13980bail_free_rcverr:
13981 free_rcverr(dd);
13982bail_free_cntrs:
13983 free_cntrs(dd);
13984bail_clear_intr:
13985 clean_up_interrupts(dd);
13986bail_cleanup:
13987 hfi1_pcie_ddcleanup(dd);
13988bail_free:
13989 hfi1_free_devdata(dd);
13990 dd = ERR_PTR(ret);
13991bail:
13992 return dd;
13993}
13994
13995static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
13996 u32 dw_len)
13997{
13998 u32 delta_cycles;
13999 u32 current_egress_rate = ppd->current_egress_rate;
14000 /* rates here are in units of 10^6 bits/sec */
14001
14002 if (desired_egress_rate == -1)
14003 return 0; /* shouldn't happen */
14004
14005 if (desired_egress_rate >= current_egress_rate)
14006 return 0; /* we can't help go faster, only slower */
14007
14008 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14009 egress_cycles(dw_len * 4, current_egress_rate);
14010
14011 return (u16)delta_cycles;
14012}
14013
14014
14015/**
14016 * create_pbc - build a pbc for transmission
14017 * @flags: special case flags or-ed in built pbc
14018 * @srate: static rate
14019 * @vl: vl
14020 * @dwlen: dword length (header words + data words + pbc words)
14021 *
14022 * Create a PBC with the given flags, rate, VL, and length.
14023 *
14024 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14025 * for verbs, which does not use this PSM feature. The lone other caller
14026 * is for the diagnostic interface which calls this if the user does not
14027 * supply their own PBC.
14028 */
14029u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14030 u32 dw_len)
14031{
14032 u64 pbc, delay = 0;
14033
14034 if (unlikely(srate_mbs))
14035 delay = delay_cycles(ppd, srate_mbs, dw_len);
14036
14037 pbc = flags
14038 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14039 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14040 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14041 | (dw_len & PBC_LENGTH_DWS_MASK)
14042 << PBC_LENGTH_DWS_SHIFT;
14043
14044 return pbc;
14045}
14046
14047#define SBUS_THERMAL 0x4f
14048#define SBUS_THERM_MONITOR_MODE 0x1
14049
14050#define THERM_FAILURE(dev, ret, reason) \
14051 dd_dev_err((dd), \
14052 "Thermal sensor initialization failed: %s (%d)\n", \
14053 (reason), (ret))
14054
14055/*
14056 * Initialize the Avago Thermal sensor.
14057 *
14058 * After initialization, enable polling of thermal sensor through
14059 * SBus interface. In order for this to work, the SBus Master
14060 * firmware has to be loaded due to the fact that the HW polling
14061 * logic uses SBus interrupts, which are not supported with
14062 * default firmware. Otherwise, no data will be returned through
14063 * the ASIC_STS_THERM CSR.
14064 */
14065static int thermal_init(struct hfi1_devdata *dd)
14066{
14067 int ret = 0;
14068
14069 if (dd->icode != ICODE_RTL_SILICON ||
14070 !(dd->flags & HFI1_DO_INIT_ASIC))
14071 return ret;
14072
14073 acquire_hw_mutex(dd);
14074 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014075 /* Disable polling of thermal readings */
14076 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14077 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014078 /* Thermal Sensor Initialization */
14079 /* Step 1: Reset the Thermal SBus Receiver */
14080 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14081 RESET_SBUS_RECEIVER, 0);
14082 if (ret) {
14083 THERM_FAILURE(dd, ret, "Bus Reset");
14084 goto done;
14085 }
14086 /* Step 2: Set Reset bit in Thermal block */
14087 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14088 WRITE_SBUS_RECEIVER, 0x1);
14089 if (ret) {
14090 THERM_FAILURE(dd, ret, "Therm Block Reset");
14091 goto done;
14092 }
14093 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14094 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14095 WRITE_SBUS_RECEIVER, 0x32);
14096 if (ret) {
14097 THERM_FAILURE(dd, ret, "Write Clock Div");
14098 goto done;
14099 }
14100 /* Step 4: Select temperature mode */
14101 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14102 WRITE_SBUS_RECEIVER,
14103 SBUS_THERM_MONITOR_MODE);
14104 if (ret) {
14105 THERM_FAILURE(dd, ret, "Write Mode Sel");
14106 goto done;
14107 }
14108 /* Step 5: De-assert block reset and start conversion */
14109 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14110 WRITE_SBUS_RECEIVER, 0x2);
14111 if (ret) {
14112 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14113 goto done;
14114 }
14115 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14116 msleep(22);
14117
14118 /* Enable polling of thermal readings */
14119 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14120done:
14121 release_hw_mutex(dd);
14122 return ret;
14123}
14124
14125static void handle_temp_err(struct hfi1_devdata *dd)
14126{
14127 struct hfi1_pportdata *ppd = &dd->pport[0];
14128 /*
14129 * Thermal Critical Interrupt
14130 * Put the device into forced freeze mode, take link down to
14131 * offline, and put DC into reset.
14132 */
14133 dd_dev_emerg(dd,
14134 "Critical temperature reached! Forcing device into freeze mode!\n");
14135 dd->flags |= HFI1_FORCED_FREEZE;
14136 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14137 /*
14138 * Shut DC down as much and as quickly as possible.
14139 *
14140 * Step 1: Take the link down to OFFLINE. This will cause the
14141 * 8051 to put the Serdes in reset. However, we don't want to
14142 * go through the entire link state machine since we want to
14143 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14144 * but rather an attempt to save the chip.
14145 * Code below is almost the same as quiet_serdes() but avoids
14146 * all the extra work and the sleeps.
14147 */
14148 ppd->driver_link_ready = 0;
14149 ppd->link_enabled = 0;
14150 set_physical_link_state(dd, PLS_OFFLINE |
14151 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14152 /*
14153 * Step 2: Shutdown LCB and 8051
14154 * After shutdown, do not restore DC_CFG_RESET value.
14155 */
14156 dc_shutdown(dd);
14157}