blob: 63d5d71e8c92074aa2e00335d345edf42f6a8dba [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
68#define NUM_IB_PORTS 1
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78/*
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
84 */
85uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16; /* same as qib */
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101/* Other driver tunables */
102uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup; /* skip LNI */
106
107struct flag_table {
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
111 u16 unused0;
112 u32 unused1;
113};
114
115/* str must be a string constant */
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119/* Send Error Consequences */
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4 /* per-context only */
123#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124
Mike Marciniszyn77241052015-07-30 15:17:43 -0400125#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500126#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define NUM_MAP_REGS 32
128
129/* Bit offset into the GUID which carries HFI id information */
130#define GUID_HFI_INDEX_SHIFT 39
131
132/* extract the emulation revision */
133#define emulator_rev(dd) ((dd)->irev >> 8)
134/* parallel and serial emulation versions are 3 and 4 respectively */
135#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
136#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
137
138/* RSM fields */
139
140/* packet type */
141#define IB_PACKET_TYPE 2ull
142#define QW_SHIFT 6ull
143/* QPN[7..1] */
144#define QPN_WIDTH 7ull
145
146/* LRH.BTH: QW 0, OFFSET 48 - for match */
147#define LRH_BTH_QW 0ull
148#define LRH_BTH_BIT_OFFSET 48ull
149#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
150#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
151#define LRH_BTH_SELECT
152#define LRH_BTH_MASK 3ull
153#define LRH_BTH_VALUE 2ull
154
155/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
156#define LRH_SC_QW 0ull
157#define LRH_SC_BIT_OFFSET 56ull
158#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
159#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
160#define LRH_SC_MASK 128ull
161#define LRH_SC_VALUE 0ull
162
163/* SC[n..0] QW 0, OFFSET 60 - for select */
164#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
165
166/* QPN[m+n:1] QW 1, OFFSET 1 */
167#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
168
169/* defines to build power on SC2VL table */
170#define SC2VL_VAL( \
171 num, \
172 sc0, sc0val, \
173 sc1, sc1val, \
174 sc2, sc2val, \
175 sc3, sc3val, \
176 sc4, sc4val, \
177 sc5, sc5val, \
178 sc6, sc6val, \
179 sc7, sc7val) \
180( \
181 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
182 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
183 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
184 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
185 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
186 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
187 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
188 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
189)
190
191#define DC_SC_VL_VAL( \
192 range, \
193 e0, e0val, \
194 e1, e1val, \
195 e2, e2val, \
196 e3, e3val, \
197 e4, e4val, \
198 e5, e5val, \
199 e6, e6val, \
200 e7, e7val, \
201 e8, e8val, \
202 e9, e9val, \
203 e10, e10val, \
204 e11, e11val, \
205 e12, e12val, \
206 e13, e13val, \
207 e14, e14val, \
208 e15, e15val) \
209( \
210 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
211 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
212 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
213 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
214 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
215 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
216 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
217 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
218 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
219 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
220 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
221 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
222 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
223 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
224 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
225 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
226)
227
228/* all CceStatus sub-block freeze bits */
229#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
230 | CCE_STATUS_RXE_FROZE_SMASK \
231 | CCE_STATUS_TXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
233/* all CceStatus sub-block TXE pause bits */
234#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
235 | CCE_STATUS_TXE_PAUSED_SMASK \
236 | CCE_STATUS_SDMA_PAUSED_SMASK)
237/* all CceStatus sub-block RXE pause bits */
238#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
239
240/*
241 * CCE Error flags.
242 */
243static struct flag_table cce_err_status_flags[] = {
244/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
245 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
246/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
247 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
248/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
249 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
250/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
251 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
252/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
253 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
254/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
255 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
256/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
258/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
259 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
260/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
261 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
262/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
263 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
264/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
265 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
266/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
267 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
268/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
270/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
271 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
272/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
273 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
274/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
275 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
276/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
277 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
278/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
279 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
280/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
281 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
282/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
283 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
284/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
285 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
286/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
287 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
288/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
289 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
290/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
291 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
292/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
293 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
294/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
295 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
296/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
297 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
298/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
299 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
300/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
301 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
302/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
303 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
304/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
305 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
306/*31*/ FLAG_ENTRY0("LATriggered",
307 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
308/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
309 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
310/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
311 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
312/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
313 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
314/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
315 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
316/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
317 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
318/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
319 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
320/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
321 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
322/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
323 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
324/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
325 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
326/*41-63 reserved*/
327};
328
329/*
330 * Misc Error flags
331 */
332#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
333static struct flag_table misc_err_status_flags[] = {
334/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
335/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
336/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
337/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
338/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
339/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
340/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
341/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
342/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
343/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
344/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
345/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
346/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
347};
348
349/*
350 * TXE PIO Error flags and consequences
351 */
352static struct flag_table pio_err_status_flags[] = {
353/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
354 SEC_WRITE_DROPPED,
355 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
356/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
357 SEC_SPC_FREEZE,
358 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
359/* 2*/ FLAG_ENTRY("PioCsrParity",
360 SEC_SPC_FREEZE,
361 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
362/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
363 SEC_SPC_FREEZE,
364 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
365/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
366 SEC_SPC_FREEZE,
367 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
368/* 5*/ FLAG_ENTRY("PioPccFifoParity",
369 SEC_SPC_FREEZE,
370 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
371/* 6*/ FLAG_ENTRY("PioPecFifoParity",
372 SEC_SPC_FREEZE,
373 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
374/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
375 SEC_SPC_FREEZE,
376 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
377/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
378 SEC_SPC_FREEZE,
379 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
380/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
381 SEC_SPC_FREEZE,
382 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
383/*10*/ FLAG_ENTRY("PioSmPktResetParity",
384 SEC_SPC_FREEZE,
385 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
386/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
387 SEC_SPC_FREEZE,
388 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
389/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
390 SEC_SPC_FREEZE,
391 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
392/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
393 0,
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
395/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
396 0,
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
398/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
399 SEC_SPC_FREEZE,
400 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
401/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
402 SEC_SPC_FREEZE,
403 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
404/*17*/ FLAG_ENTRY("PioInitSmIn",
405 0,
406 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
407/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
408 SEC_SPC_FREEZE,
409 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
410/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
411 SEC_SPC_FREEZE,
412 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
413/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
414 0,
415 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
416/*21*/ FLAG_ENTRY("PioWriteDataParity",
417 SEC_SPC_FREEZE,
418 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
419/*22*/ FLAG_ENTRY("PioStateMachine",
420 SEC_SPC_FREEZE,
421 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
422/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
423 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
425/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
426 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
428/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
429 SEC_SPC_FREEZE,
430 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
431/*26*/ FLAG_ENTRY("PioVlfSopParity",
432 SEC_SPC_FREEZE,
433 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
434/*27*/ FLAG_ENTRY("PioVlFifoParity",
435 SEC_SPC_FREEZE,
436 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
437/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
438 SEC_SPC_FREEZE,
439 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
440/*29*/ FLAG_ENTRY("PioPpmcSopLen",
441 SEC_SPC_FREEZE,
442 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
443/*30-31 reserved*/
444/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
445 SEC_SPC_FREEZE,
446 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
447/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
448 SEC_SPC_FREEZE,
449 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
450/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
453/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
456/*36-63 reserved*/
457};
458
459/* TXE PIO errors that cause an SPC freeze */
460#define ALL_PIO_FREEZE_ERR \
461 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
462 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
490
491/*
492 * TXE SDMA Error flags
493 */
494static struct flag_table sdma_err_status_flags[] = {
495/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
496 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
497/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
498 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
499/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
500 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
501/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
502 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
503/*04-63 reserved*/
504};
505
506/* TXE SDMA errors that cause an SPC freeze */
507#define ALL_SDMA_FREEZE_ERR \
508 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
509 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
511
512/*
513 * TXE Egress Error flags
514 */
515#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
516static struct flag_table egress_err_status_flags[] = {
517/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
518/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
519/* 2 reserved */
520/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
521 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
522/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
523/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
524/* 6 reserved */
525/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
526 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
527/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
528 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
529/* 9-10 reserved */
530/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
531 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
532/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
533/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
534/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
535/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
536/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
537 SEES(TX_SDMA0_DISALLOWED_PACKET)),
538/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
539 SEES(TX_SDMA1_DISALLOWED_PACKET)),
540/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
541 SEES(TX_SDMA2_DISALLOWED_PACKET)),
542/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
543 SEES(TX_SDMA3_DISALLOWED_PACKET)),
544/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
545 SEES(TX_SDMA4_DISALLOWED_PACKET)),
546/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
547 SEES(TX_SDMA5_DISALLOWED_PACKET)),
548/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
549 SEES(TX_SDMA6_DISALLOWED_PACKET)),
550/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
551 SEES(TX_SDMA7_DISALLOWED_PACKET)),
552/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
553 SEES(TX_SDMA8_DISALLOWED_PACKET)),
554/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
555 SEES(TX_SDMA9_DISALLOWED_PACKET)),
556/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
557 SEES(TX_SDMA10_DISALLOWED_PACKET)),
558/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
559 SEES(TX_SDMA11_DISALLOWED_PACKET)),
560/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
561 SEES(TX_SDMA12_DISALLOWED_PACKET)),
562/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
563 SEES(TX_SDMA13_DISALLOWED_PACKET)),
564/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
565 SEES(TX_SDMA14_DISALLOWED_PACKET)),
566/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
567 SEES(TX_SDMA15_DISALLOWED_PACKET)),
568/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
569 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
570/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
571 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
572/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
573 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
574/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
575 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
576/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
577 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
578/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
579 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
580/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
582/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
584/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
586/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
587/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
588/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
589/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
590/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
591/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
592/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
593/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
594/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
595/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
596/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
597/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
598/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
599/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
600/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
601/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
602/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
603/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
604/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
605/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
606/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
607/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
608 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
609/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
610 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
611};
612
613/*
614 * TXE Egress Error Info flags
615 */
616#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
617static struct flag_table egress_err_info_flags[] = {
618/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
619/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
620/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
621/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
623/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
624/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
625/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
626/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
627/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
628/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
629/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
630/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
631/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
632/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
633/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
634/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
635/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
636/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
637/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
638/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
639/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
640};
641
642/* TXE Egress errors that cause an SPC freeze */
643#define ALL_TXE_EGRESS_FREEZE_ERR \
644 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
645 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
646 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
647 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
648 | SEES(TX_LAUNCH_CSR_PARITY) \
649 | SEES(TX_SBRD_CTL_CSR_PARITY) \
650 | SEES(TX_CONFIG_PARITY) \
651 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
652 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
653 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
654 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
655 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
656 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
660 | SEES(TX_CREDIT_RETURN_PARITY))
661
662/*
663 * TXE Send error flags
664 */
665#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
666static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500667/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400668/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
669/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
670};
671
672/*
673 * TXE Send Context Error flags and consequences
674 */
675static struct flag_table sc_err_status_flags[] = {
676/* 0*/ FLAG_ENTRY("InconsistentSop",
677 SEC_PACKET_DROPPED | SEC_SC_HALTED,
678 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
679/* 1*/ FLAG_ENTRY("DisallowedPacket",
680 SEC_PACKET_DROPPED | SEC_SC_HALTED,
681 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
682/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
683 SEC_WRITE_DROPPED | SEC_SC_HALTED,
684 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
685/* 3*/ FLAG_ENTRY("WriteOverflow",
686 SEC_WRITE_DROPPED | SEC_SC_HALTED,
687 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
688/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
689 SEC_WRITE_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
691/* 5-63 reserved*/
692};
693
694/*
695 * RXE Receive Error flags
696 */
697#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
698static struct flag_table rxe_err_status_flags[] = {
699/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
700/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
701/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
702/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
703/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
704/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
705/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
706/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
707/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
708/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
709/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
710/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
711/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
712/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
713/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
714/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
715/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
716 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
717/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
718/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
719/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
720 RXES(RBUF_BLOCK_LIST_READ_UNC)),
721/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
722 RXES(RBUF_BLOCK_LIST_READ_COR)),
723/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
724 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
725/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
726 RXES(RBUF_CSR_QENT_CNT_PARITY)),
727/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
728 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
729/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
730 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
731/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
732/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
733/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
734 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
735/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
736/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
737/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
738/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
739/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
740/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
741/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
742/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
743 RXES(RBUF_FL_INITDONE_PARITY)),
744/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
745 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
746/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
747/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
748/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
749/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
750 RXES(LOOKUP_DES_PART1_UNC_COR)),
751/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
752 RXES(LOOKUP_DES_PART2_PARITY)),
753/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
754/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
755/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
756/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
757/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
758/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
759/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
760/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
761/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
762/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
763/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
764/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
765/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
766/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
767/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
768/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
769/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
770/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
771/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
772/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
773/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
774/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
775};
776
777/* RXE errors that will trigger an SPC freeze */
778#define ALL_RXE_FREEZE_ERR \
779 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
780 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
781 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
782 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
783 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
784 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
823
824#define RXE_FREEZE_ABORT_MASK \
825 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
826 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
827 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
828
829/*
830 * DCC Error Flags
831 */
832#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
833static struct flag_table dcc_err_flags[] = {
834 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
835 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
836 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
837 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
838 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
839 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
840 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
841 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
842 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
843 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
844 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
845 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
846 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
847 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
848 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
849 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
850 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
851 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
852 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
853 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
854 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
855 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
856 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
857 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
858 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
859 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
860 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
861 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
862 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
863 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
864 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
865 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
866 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
867 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
868 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
869 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
870 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
871 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
872 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
873 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
874 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
875 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
876 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
877 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
878 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
879 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
880};
881
882/*
883 * LCB error flags
884 */
885#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
886static struct flag_table lcb_err_flags[] = {
887/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
888/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
889/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
890/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
891 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
892/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
893/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
894/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
895/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
896/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
897/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
898/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
899/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
900/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
901/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
902 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
903/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
904/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
905/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
906/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
907/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
908/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
909 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
910/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
911/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
912/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
913/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
914/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
915/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
916/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
917 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
918/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
919/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
920 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
921/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
922 LCBE(REDUNDANT_FLIT_PARITY_ERR))
923};
924
925/*
926 * DC8051 Error Flags
927 */
928#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
929static struct flag_table dc8051_err_flags[] = {
930 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
931 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
932 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
933 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
934 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
935 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
936 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
937 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
938 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
939 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
940 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
941};
942
943/*
944 * DC8051 Information Error flags
945 *
946 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
947 */
948static struct flag_table dc8051_info_err_flags[] = {
949 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
950 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
951 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
952 FLAG_ENTRY0("Serdes internal loopback failure",
953 FAILED_SERDES_INTERNAL_LOOPBACK),
954 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
955 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
956 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
957 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
958 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
959 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
960 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
961 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
962};
963
964/*
965 * DC8051 Information Host Information flags
966 *
967 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
968 */
969static struct flag_table dc8051_info_host_msg_flags[] = {
970 FLAG_ENTRY0("Host request done", 0x0001),
971 FLAG_ENTRY0("BC SMA message", 0x0002),
972 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
973 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
974 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
975 FLAG_ENTRY0("External device config request", 0x0020),
976 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
977 FLAG_ENTRY0("LinkUp achieved", 0x0080),
978 FLAG_ENTRY0("Link going down", 0x0100),
979};
980
981
982static u32 encoded_size(u32 size);
983static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
984static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
985static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
986 u8 *continuous);
987static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
988 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
989static void read_vc_remote_link_width(struct hfi1_devdata *dd,
990 u8 *remote_tx_rate, u16 *link_widths);
991static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
992 u8 *flag_bits, u16 *link_widths);
993static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
994 u8 *device_rev);
995static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
996static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
997static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
998 u8 *tx_polarity_inversion,
999 u8 *rx_polarity_inversion, u8 *max_rate);
1000static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1001 unsigned int context, u64 err_status);
1002static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1003static void handle_dcc_err(struct hfi1_devdata *dd,
1004 unsigned int context, u64 err_status);
1005static void handle_lcb_err(struct hfi1_devdata *dd,
1006 unsigned int context, u64 err_status);
1007static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1008static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void set_partition_keys(struct hfi1_pportdata *);
1016static const char *link_state_name(u32 state);
1017static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1018 u32 state);
1019static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1020 u64 *out_data);
1021static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1022static int thermal_init(struct hfi1_devdata *dd);
1023
1024static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1025 int msecs);
1026static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1027static void handle_temp_err(struct hfi1_devdata *);
1028static void dc_shutdown(struct hfi1_devdata *);
1029static void dc_start(struct hfi1_devdata *);
1030
1031/*
1032 * Error interrupt table entry. This is used as input to the interrupt
1033 * "clear down" routine used for all second tier error interrupt register.
1034 * Second tier interrupt registers have a single bit representing them
1035 * in the top-level CceIntStatus.
1036 */
1037struct err_reg_info {
1038 u32 status; /* status CSR offset */
1039 u32 clear; /* clear CSR offset */
1040 u32 mask; /* mask CSR offset */
1041 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1042 const char *desc;
1043};
1044
1045#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1046#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1047#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1048
1049/*
1050 * Helpers for building HFI and DC error interrupt table entries. Different
1051 * helpers are needed because of inconsistent register names.
1052 */
1053#define EE(reg, handler, desc) \
1054 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1055 handler, desc }
1056#define DC_EE1(reg, handler, desc) \
1057 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1058#define DC_EE2(reg, handler, desc) \
1059 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1060
1061/*
1062 * Table of the "misc" grouping of error interrupts. Each entry refers to
1063 * another register containing more information.
1064 */
1065static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1066/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1067/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1068/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1069/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1070/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1071/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1072/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1073/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1074 /* the rest are reserved */
1075};
1076
1077/*
1078 * Index into the Various section of the interrupt sources
1079 * corresponding to the Critical Temperature interrupt.
1080 */
1081#define TCRIT_INT_SOURCE 4
1082
1083/*
1084 * SDMA error interrupt entry - refers to another register containing more
1085 * information.
1086 */
1087static const struct err_reg_info sdma_eng_err =
1088 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1089
1090static const struct err_reg_info various_err[NUM_VARIOUS] = {
1091/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1092/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1093/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1094/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1095/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1096 /* rest are reserved */
1097};
1098
1099/*
1100 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1101 * register can not be derived from the MTU value because 10K is not
1102 * a power of 2. Therefore, we need a constant. Everything else can
1103 * be calculated.
1104 */
1105#define DCC_CFG_PORT_MTU_CAP_10240 7
1106
1107/*
1108 * Table of the DC grouping of error interrupts. Each entry refers to
1109 * another register containing more information.
1110 */
1111static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1112/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1113/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1114/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1115/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1116 /* the rest are reserved */
1117};
1118
1119struct cntr_entry {
1120 /*
1121 * counter name
1122 */
1123 char *name;
1124
1125 /*
1126 * csr to read for name (if applicable)
1127 */
1128 u64 csr;
1129
1130 /*
1131 * offset into dd or ppd to store the counter's value
1132 */
1133 int offset;
1134
1135 /*
1136 * flags
1137 */
1138 u8 flags;
1139
1140 /*
1141 * accessor for stat element, context either dd or ppd
1142 */
1143 u64 (*rw_cntr)(const struct cntr_entry *,
1144 void *context,
1145 int vl,
1146 int mode,
1147 u64 data);
1148};
1149
1150#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1151#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1152
1153#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1154{ \
1155 name, \
1156 csr, \
1157 offset, \
1158 flags, \
1159 accessor \
1160}
1161
1162/* 32bit RXE */
1163#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1164CNTR_ELEM(#name, \
1165 (counter * 8 + RCV_COUNTER_ARRAY32), \
1166 0, flags | CNTR_32BIT, \
1167 port_access_u32_csr)
1168
1169#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1170CNTR_ELEM(#name, \
1171 (counter * 8 + RCV_COUNTER_ARRAY32), \
1172 0, flags | CNTR_32BIT, \
1173 dev_access_u32_csr)
1174
1175/* 64bit RXE */
1176#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY64), \
1179 0, flags, \
1180 port_access_u64_csr)
1181
1182#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1183CNTR_ELEM(#name, \
1184 (counter * 8 + RCV_COUNTER_ARRAY64), \
1185 0, flags, \
1186 dev_access_u64_csr)
1187
1188#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1189#define OVR_ELM(ctx) \
1190CNTR_ELEM("RcvHdrOvr" #ctx, \
1191 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1192 0, CNTR_NORMAL, port_access_u64_csr)
1193
1194/* 32bit TXE */
1195#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1196CNTR_ELEM(#name, \
1197 (counter * 8 + SEND_COUNTER_ARRAY32), \
1198 0, flags | CNTR_32BIT, \
1199 port_access_u32_csr)
1200
1201/* 64bit TXE */
1202#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY64), \
1205 0, flags, \
1206 port_access_u64_csr)
1207
1208# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1209CNTR_ELEM(#name,\
1210 counter * 8 + SEND_COUNTER_ARRAY64, \
1211 0, \
1212 flags, \
1213 dev_access_u64_csr)
1214
1215/* CCE */
1216#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1217CNTR_ELEM(#name, \
1218 (counter * 8 + CCE_COUNTER_ARRAY32), \
1219 0, flags | CNTR_32BIT, \
1220 dev_access_u32_csr)
1221
1222#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1223CNTR_ELEM(#name, \
1224 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1225 0, flags | CNTR_32BIT, \
1226 dev_access_u32_csr)
1227
1228/* DC */
1229#define DC_PERF_CNTR(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 counter, \
1232 0, \
1233 flags, \
1234 dev_access_u64_csr)
1235
1236#define DC_PERF_CNTR_LCB(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dc_access_lcb_cntr)
1242
1243/* ibp counters */
1244#define SW_IBP_CNTR(name, cntr) \
1245CNTR_ELEM(#name, \
1246 0, \
1247 0, \
1248 CNTR_SYNTH, \
1249 access_ibp_##cntr)
1250
1251u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1252{
1253 u64 val;
1254
1255 if (dd->flags & HFI1_PRESENT) {
1256 val = readq((void __iomem *)dd->kregbase + offset);
1257 return val;
1258 }
1259 return -1;
1260}
1261
1262void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1263{
1264 if (dd->flags & HFI1_PRESENT)
1265 writeq(value, (void __iomem *)dd->kregbase + offset);
1266}
1267
1268void __iomem *get_csr_addr(
1269 struct hfi1_devdata *dd,
1270 u32 offset)
1271{
1272 return (void __iomem *)dd->kregbase + offset;
1273}
1274
1275static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1276 int mode, u64 value)
1277{
1278 u64 ret;
1279
1280
1281 if (mode == CNTR_MODE_R) {
1282 ret = read_csr(dd, csr);
1283 } else if (mode == CNTR_MODE_W) {
1284 write_csr(dd, csr, value);
1285 ret = value;
1286 } else {
1287 dd_dev_err(dd, "Invalid cntr register access mode");
1288 return 0;
1289 }
1290
1291 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1292 return ret;
1293}
1294
1295/* Dev Access */
1296static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297 void *context, int vl, int mode, u64 data)
1298{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301299 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001300 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001301
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001302 if (entry->flags & CNTR_SDMA) {
1303 if (vl == CNTR_INVALID_VL)
1304 return 0;
1305 csr += 0x100 * vl;
1306 } else {
1307 if (vl != CNTR_INVALID_VL)
1308 return 0;
1309 }
1310 return read_write_csr(dd, csr, mode, data);
1311}
1312
1313static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1314 void *context, int idx, int mode, u64 data)
1315{
1316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1317
1318 if (dd->per_sdma && idx < dd->num_sdma)
1319 return dd->per_sdma[idx].err_cnt;
1320 return 0;
1321}
1322
1323static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1324 void *context, int idx, int mode, u64 data)
1325{
1326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1327
1328 if (dd->per_sdma && idx < dd->num_sdma)
1329 return dd->per_sdma[idx].sdma_int_cnt;
1330 return 0;
1331}
1332
1333static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1334 void *context, int idx, int mode, u64 data)
1335{
1336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1337
1338 if (dd->per_sdma && idx < dd->num_sdma)
1339 return dd->per_sdma[idx].idle_int_cnt;
1340 return 0;
1341}
1342
1343static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1344 void *context, int idx, int mode,
1345 u64 data)
1346{
1347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348
1349 if (dd->per_sdma && idx < dd->num_sdma)
1350 return dd->per_sdma[idx].progress_int_cnt;
1351 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001352}
1353
1354static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1355 int vl, int mode, u64 data)
1356{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301357 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001358
1359 u64 val = 0;
1360 u64 csr = entry->csr;
1361
1362 if (entry->flags & CNTR_VL) {
1363 if (vl == CNTR_INVALID_VL)
1364 return 0;
1365 csr += 8 * vl;
1366 } else {
1367 if (vl != CNTR_INVALID_VL)
1368 return 0;
1369 }
1370
1371 val = read_write_csr(dd, csr, mode, data);
1372 return val;
1373}
1374
1375static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1376 int vl, int mode, u64 data)
1377{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301378 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001379 u32 csr = entry->csr;
1380 int ret = 0;
1381
1382 if (vl != CNTR_INVALID_VL)
1383 return 0;
1384 if (mode == CNTR_MODE_R)
1385 ret = read_lcb_csr(dd, csr, &data);
1386 else if (mode == CNTR_MODE_W)
1387 ret = write_lcb_csr(dd, csr, data);
1388
1389 if (ret) {
1390 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1391 return 0;
1392 }
1393
1394 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1395 return data;
1396}
1397
1398/* Port Access */
1399static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1400 int vl, int mode, u64 data)
1401{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301402 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001403
1404 if (vl != CNTR_INVALID_VL)
1405 return 0;
1406 return read_write_csr(ppd->dd, entry->csr, mode, data);
1407}
1408
1409static u64 port_access_u64_csr(const struct cntr_entry *entry,
1410 void *context, int vl, int mode, u64 data)
1411{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301412 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001413 u64 val;
1414 u64 csr = entry->csr;
1415
1416 if (entry->flags & CNTR_VL) {
1417 if (vl == CNTR_INVALID_VL)
1418 return 0;
1419 csr += 8 * vl;
1420 } else {
1421 if (vl != CNTR_INVALID_VL)
1422 return 0;
1423 }
1424 val = read_write_csr(ppd->dd, csr, mode, data);
1425 return val;
1426}
1427
1428/* Software defined */
1429static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1430 u64 data)
1431{
1432 u64 ret;
1433
1434 if (mode == CNTR_MODE_R) {
1435 ret = *cntr;
1436 } else if (mode == CNTR_MODE_W) {
1437 *cntr = data;
1438 ret = data;
1439 } else {
1440 dd_dev_err(dd, "Invalid cntr sw access mode");
1441 return 0;
1442 }
1443
1444 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1445
1446 return ret;
1447}
1448
1449static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1450 int vl, int mode, u64 data)
1451{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301452 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001453
1454 if (vl != CNTR_INVALID_VL)
1455 return 0;
1456 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1457}
1458
1459static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1460 int vl, int mode, u64 data)
1461{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301462 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001463
1464 if (vl != CNTR_INVALID_VL)
1465 return 0;
1466 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1467}
1468
Dean Luick6d014532015-12-01 15:38:23 -05001469static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1470 void *context, int vl, int mode,
1471 u64 data)
1472{
1473 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1474
1475 if (vl != CNTR_INVALID_VL)
1476 return 0;
1477 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1478}
1479
Mike Marciniszyn77241052015-07-30 15:17:43 -04001480static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1481 void *context, int vl, int mode, u64 data)
1482{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301483 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001484
1485 if (vl != CNTR_INVALID_VL)
1486 return 0;
1487
1488 return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1489}
1490
1491static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1492 void *context, int vl, int mode, u64 data)
1493{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301494 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001495
1496 if (vl != CNTR_INVALID_VL)
1497 return 0;
1498
1499 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1500 mode, data);
1501}
1502
1503static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1504 void *context, int vl, int mode, u64 data)
1505{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301506 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001507
1508 if (vl != CNTR_INVALID_VL)
1509 return 0;
1510
1511 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1512 mode, data);
1513}
1514
1515u64 get_all_cpu_total(u64 __percpu *cntr)
1516{
1517 int cpu;
1518 u64 counter = 0;
1519
1520 for_each_possible_cpu(cpu)
1521 counter += *per_cpu_ptr(cntr, cpu);
1522 return counter;
1523}
1524
1525static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1526 u64 __percpu *cntr,
1527 int vl, int mode, u64 data)
1528{
1529
1530 u64 ret = 0;
1531
1532 if (vl != CNTR_INVALID_VL)
1533 return 0;
1534
1535 if (mode == CNTR_MODE_R) {
1536 ret = get_all_cpu_total(cntr) - *z_val;
1537 } else if (mode == CNTR_MODE_W) {
1538 /* A write can only zero the counter */
1539 if (data == 0)
1540 *z_val = get_all_cpu_total(cntr);
1541 else
1542 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1543 } else {
1544 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1545 return 0;
1546 }
1547
1548 return ret;
1549}
1550
1551static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1552 void *context, int vl, int mode, u64 data)
1553{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301554 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001555
1556 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1557 mode, data);
1558}
1559
1560static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1561 void *context, int vl, int mode, u64 data)
1562{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301563 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001564
1565 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1566 mode, data);
1567}
1568
1569static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1570 void *context, int vl, int mode, u64 data)
1571{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301572 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001573
1574 return dd->verbs_dev.n_piowait;
1575}
1576
1577static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1578 void *context, int vl, int mode, u64 data)
1579{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301580 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001581
1582 return dd->verbs_dev.n_txwait;
1583}
1584
1585static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1586 void *context, int vl, int mode, u64 data)
1587{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301588 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001589
1590 return dd->verbs_dev.n_kmem_wait;
1591}
1592
Dean Luickb4219222015-10-26 10:28:35 -04001593static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1594 void *context, int vl, int mode, u64 data)
1595{
1596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1597
1598 return dd->verbs_dev.n_send_schedule;
1599}
1600
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001601/* Software counters for the error status bits within MISC_ERR_STATUS */
1602static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1603 void *context, int vl, int mode,
1604 u64 data)
1605{
1606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1607
1608 return dd->misc_err_status_cnt[12];
1609}
1610
1611static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1612 void *context, int vl, int mode,
1613 u64 data)
1614{
1615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1616
1617 return dd->misc_err_status_cnt[11];
1618}
1619
1620static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1621 void *context, int vl, int mode,
1622 u64 data)
1623{
1624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1625
1626 return dd->misc_err_status_cnt[10];
1627}
1628
1629static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1630 void *context, int vl,
1631 int mode, u64 data)
1632{
1633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1634
1635 return dd->misc_err_status_cnt[9];
1636}
1637
1638static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1639 void *context, int vl, int mode,
1640 u64 data)
1641{
1642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1643
1644 return dd->misc_err_status_cnt[8];
1645}
1646
1647static u64 access_misc_efuse_read_bad_addr_err_cnt(
1648 const struct cntr_entry *entry,
1649 void *context, int vl, int mode, u64 data)
1650{
1651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1652
1653 return dd->misc_err_status_cnt[7];
1654}
1655
1656static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1657 void *context, int vl,
1658 int mode, u64 data)
1659{
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->misc_err_status_cnt[6];
1663}
1664
1665static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1666 void *context, int vl, int mode,
1667 u64 data)
1668{
1669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1670
1671 return dd->misc_err_status_cnt[5];
1672}
1673
1674static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1675 void *context, int vl, int mode,
1676 u64 data)
1677{
1678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1679
1680 return dd->misc_err_status_cnt[4];
1681}
1682
1683static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1684 void *context, int vl,
1685 int mode, u64 data)
1686{
1687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1688
1689 return dd->misc_err_status_cnt[3];
1690}
1691
1692static u64 access_misc_csr_write_bad_addr_err_cnt(
1693 const struct cntr_entry *entry,
1694 void *context, int vl, int mode, u64 data)
1695{
1696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1697
1698 return dd->misc_err_status_cnt[2];
1699}
1700
1701static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1702 void *context, int vl,
1703 int mode, u64 data)
1704{
1705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706
1707 return dd->misc_err_status_cnt[1];
1708}
1709
1710static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1711 void *context, int vl, int mode,
1712 u64 data)
1713{
1714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1715
1716 return dd->misc_err_status_cnt[0];
1717}
1718
1719/*
1720 * Software counter for the aggregate of
1721 * individual CceErrStatus counters
1722 */
1723static u64 access_sw_cce_err_status_aggregated_cnt(
1724 const struct cntr_entry *entry,
1725 void *context, int vl, int mode, u64 data)
1726{
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729 return dd->sw_cce_err_status_aggregate;
1730}
1731
1732/*
1733 * Software counters corresponding to each of the
1734 * error status bits within CceErrStatus
1735 */
1736static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1737 void *context, int vl, int mode,
1738 u64 data)
1739{
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742 return dd->cce_err_status_cnt[40];
1743}
1744
1745static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1746 void *context, int vl, int mode,
1747 u64 data)
1748{
1749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1750
1751 return dd->cce_err_status_cnt[39];
1752}
1753
1754static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1755 void *context, int vl, int mode,
1756 u64 data)
1757{
1758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1759
1760 return dd->cce_err_status_cnt[38];
1761}
1762
1763static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1764 void *context, int vl, int mode,
1765 u64 data)
1766{
1767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1768
1769 return dd->cce_err_status_cnt[37];
1770}
1771
1772static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1773 void *context, int vl, int mode,
1774 u64 data)
1775{
1776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1777
1778 return dd->cce_err_status_cnt[36];
1779}
1780
1781static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1782 const struct cntr_entry *entry,
1783 void *context, int vl, int mode, u64 data)
1784{
1785 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1786
1787 return dd->cce_err_status_cnt[35];
1788}
1789
1790static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1791 const struct cntr_entry *entry,
1792 void *context, int vl, int mode, u64 data)
1793{
1794 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1795
1796 return dd->cce_err_status_cnt[34];
1797}
1798
1799static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1800 void *context, int vl,
1801 int mode, u64 data)
1802{
1803 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1804
1805 return dd->cce_err_status_cnt[33];
1806}
1807
1808static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1809 void *context, int vl, int mode,
1810 u64 data)
1811{
1812 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1813
1814 return dd->cce_err_status_cnt[32];
1815}
1816
1817static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1818 void *context, int vl, int mode, u64 data)
1819{
1820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821
1822 return dd->cce_err_status_cnt[31];
1823}
1824
1825static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1826 void *context, int vl, int mode,
1827 u64 data)
1828{
1829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1830
1831 return dd->cce_err_status_cnt[30];
1832}
1833
1834static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1835 void *context, int vl, int mode,
1836 u64 data)
1837{
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[29];
1841}
1842
1843static u64 access_pcic_transmit_back_parity_err_cnt(
1844 const struct cntr_entry *entry,
1845 void *context, int vl, int mode, u64 data)
1846{
1847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1848
1849 return dd->cce_err_status_cnt[28];
1850}
1851
1852static u64 access_pcic_transmit_front_parity_err_cnt(
1853 const struct cntr_entry *entry,
1854 void *context, int vl, int mode, u64 data)
1855{
1856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1857
1858 return dd->cce_err_status_cnt[27];
1859}
1860
1861static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1862 void *context, int vl, int mode,
1863 u64 data)
1864{
1865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1866
1867 return dd->cce_err_status_cnt[26];
1868}
1869
1870static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1871 void *context, int vl, int mode,
1872 u64 data)
1873{
1874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1875
1876 return dd->cce_err_status_cnt[25];
1877}
1878
1879static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1880 void *context, int vl, int mode,
1881 u64 data)
1882{
1883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1884
1885 return dd->cce_err_status_cnt[24];
1886}
1887
1888static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1889 void *context, int vl, int mode,
1890 u64 data)
1891{
1892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1893
1894 return dd->cce_err_status_cnt[23];
1895}
1896
1897static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1898 void *context, int vl,
1899 int mode, u64 data)
1900{
1901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1902
1903 return dd->cce_err_status_cnt[22];
1904}
1905
1906static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1907 void *context, int vl, int mode,
1908 u64 data)
1909{
1910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1911
1912 return dd->cce_err_status_cnt[21];
1913}
1914
1915static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1916 const struct cntr_entry *entry,
1917 void *context, int vl, int mode, u64 data)
1918{
1919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1920
1921 return dd->cce_err_status_cnt[20];
1922}
1923
1924static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1925 void *context, int vl,
1926 int mode, u64 data)
1927{
1928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1929
1930 return dd->cce_err_status_cnt[19];
1931}
1932
1933static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1934 void *context, int vl, int mode,
1935 u64 data)
1936{
1937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1938
1939 return dd->cce_err_status_cnt[18];
1940}
1941
1942static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1943 void *context, int vl, int mode,
1944 u64 data)
1945{
1946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1947
1948 return dd->cce_err_status_cnt[17];
1949}
1950
1951static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1952 void *context, int vl, int mode,
1953 u64 data)
1954{
1955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1956
1957 return dd->cce_err_status_cnt[16];
1958}
1959
1960static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1961 void *context, int vl, int mode,
1962 u64 data)
1963{
1964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1965
1966 return dd->cce_err_status_cnt[15];
1967}
1968
1969static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1970 void *context, int vl,
1971 int mode, u64 data)
1972{
1973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1974
1975 return dd->cce_err_status_cnt[14];
1976}
1977
1978static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1979 void *context, int vl, int mode,
1980 u64 data)
1981{
1982 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1983
1984 return dd->cce_err_status_cnt[13];
1985}
1986
1987static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1988 const struct cntr_entry *entry,
1989 void *context, int vl, int mode, u64 data)
1990{
1991 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1992
1993 return dd->cce_err_status_cnt[12];
1994}
1995
1996static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1997 const struct cntr_entry *entry,
1998 void *context, int vl, int mode, u64 data)
1999{
2000 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2001
2002 return dd->cce_err_status_cnt[11];
2003}
2004
2005static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2006 const struct cntr_entry *entry,
2007 void *context, int vl, int mode, u64 data)
2008{
2009 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2010
2011 return dd->cce_err_status_cnt[10];
2012}
2013
2014static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2015 const struct cntr_entry *entry,
2016 void *context, int vl, int mode, u64 data)
2017{
2018 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2019
2020 return dd->cce_err_status_cnt[9];
2021}
2022
2023static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2024 const struct cntr_entry *entry,
2025 void *context, int vl, int mode, u64 data)
2026{
2027 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2028
2029 return dd->cce_err_status_cnt[8];
2030}
2031
2032static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2033 void *context, int vl,
2034 int mode, u64 data)
2035{
2036 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2037
2038 return dd->cce_err_status_cnt[7];
2039}
2040
2041static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2042 const struct cntr_entry *entry,
2043 void *context, int vl, int mode, u64 data)
2044{
2045 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2046
2047 return dd->cce_err_status_cnt[6];
2048}
2049
2050static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2051 void *context, int vl, int mode,
2052 u64 data)
2053{
2054 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2055
2056 return dd->cce_err_status_cnt[5];
2057}
2058
2059static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2060 void *context, int vl, int mode,
2061 u64 data)
2062{
2063 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2064
2065 return dd->cce_err_status_cnt[4];
2066}
2067
2068static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2069 const struct cntr_entry *entry,
2070 void *context, int vl, int mode, u64 data)
2071{
2072 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2073
2074 return dd->cce_err_status_cnt[3];
2075}
2076
2077static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2078 void *context, int vl,
2079 int mode, u64 data)
2080{
2081 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2082
2083 return dd->cce_err_status_cnt[2];
2084}
2085
2086static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2087 void *context, int vl,
2088 int mode, u64 data)
2089{
2090 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2091
2092 return dd->cce_err_status_cnt[1];
2093}
2094
2095static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2096 void *context, int vl, int mode,
2097 u64 data)
2098{
2099 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2100
2101 return dd->cce_err_status_cnt[0];
2102}
2103
2104/*
2105 * Software counters corresponding to each of the
2106 * error status bits within RcvErrStatus
2107 */
2108static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2109 void *context, int vl, int mode,
2110 u64 data)
2111{
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114 return dd->rcv_err_status_cnt[63];
2115}
2116
2117static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2118 void *context, int vl,
2119 int mode, u64 data)
2120{
2121 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123 return dd->rcv_err_status_cnt[62];
2124}
2125
2126static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2127 void *context, int vl, int mode,
2128 u64 data)
2129{
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132 return dd->rcv_err_status_cnt[61];
2133}
2134
2135static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2136 void *context, int vl, int mode,
2137 u64 data)
2138{
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141 return dd->rcv_err_status_cnt[60];
2142}
2143
2144static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl,
2146 int mode, u64 data)
2147{
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150 return dd->rcv_err_status_cnt[59];
2151}
2152
2153static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl,
2155 int mode, u64 data)
2156{
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159 return dd->rcv_err_status_cnt[58];
2160}
2161
2162static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl, int mode,
2164 u64 data)
2165{
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168 return dd->rcv_err_status_cnt[57];
2169}
2170
2171static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl, int mode,
2173 u64 data)
2174{
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177 return dd->rcv_err_status_cnt[56];
2178}
2179
2180static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl, int mode,
2182 u64 data)
2183{
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186 return dd->rcv_err_status_cnt[55];
2187}
2188
2189static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2190 const struct cntr_entry *entry,
2191 void *context, int vl, int mode, u64 data)
2192{
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195 return dd->rcv_err_status_cnt[54];
2196}
2197
2198static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2199 const struct cntr_entry *entry,
2200 void *context, int vl, int mode, u64 data)
2201{
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204 return dd->rcv_err_status_cnt[53];
2205}
2206
2207static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2208 void *context, int vl,
2209 int mode, u64 data)
2210{
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213 return dd->rcv_err_status_cnt[52];
2214}
2215
2216static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2217 void *context, int vl,
2218 int mode, u64 data)
2219{
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2221
2222 return dd->rcv_err_status_cnt[51];
2223}
2224
2225static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2226 void *context, int vl,
2227 int mode, u64 data)
2228{
2229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2230
2231 return dd->rcv_err_status_cnt[50];
2232}
2233
2234static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2235 void *context, int vl,
2236 int mode, u64 data)
2237{
2238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2239
2240 return dd->rcv_err_status_cnt[49];
2241}
2242
2243static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2244 void *context, int vl,
2245 int mode, u64 data)
2246{
2247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2248
2249 return dd->rcv_err_status_cnt[48];
2250}
2251
2252static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2253 void *context, int vl,
2254 int mode, u64 data)
2255{
2256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2257
2258 return dd->rcv_err_status_cnt[47];
2259}
2260
2261static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2262 void *context, int vl, int mode,
2263 u64 data)
2264{
2265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2266
2267 return dd->rcv_err_status_cnt[46];
2268}
2269
2270static u64 access_rx_hq_intr_csr_parity_err_cnt(
2271 const struct cntr_entry *entry,
2272 void *context, int vl, int mode, u64 data)
2273{
2274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2275
2276 return dd->rcv_err_status_cnt[45];
2277}
2278
2279static u64 access_rx_lookup_csr_parity_err_cnt(
2280 const struct cntr_entry *entry,
2281 void *context, int vl, int mode, u64 data)
2282{
2283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2284
2285 return dd->rcv_err_status_cnt[44];
2286}
2287
2288static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2289 const struct cntr_entry *entry,
2290 void *context, int vl, int mode, u64 data)
2291{
2292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2293
2294 return dd->rcv_err_status_cnt[43];
2295}
2296
2297static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2298 const struct cntr_entry *entry,
2299 void *context, int vl, int mode, u64 data)
2300{
2301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2302
2303 return dd->rcv_err_status_cnt[42];
2304}
2305
2306static u64 access_rx_lookup_des_part2_parity_err_cnt(
2307 const struct cntr_entry *entry,
2308 void *context, int vl, int mode, u64 data)
2309{
2310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2311
2312 return dd->rcv_err_status_cnt[41];
2313}
2314
2315static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2316 const struct cntr_entry *entry,
2317 void *context, int vl, int mode, u64 data)
2318{
2319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2320
2321 return dd->rcv_err_status_cnt[40];
2322}
2323
2324static u64 access_rx_lookup_des_part1_unc_err_cnt(
2325 const struct cntr_entry *entry,
2326 void *context, int vl, int mode, u64 data)
2327{
2328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2329
2330 return dd->rcv_err_status_cnt[39];
2331}
2332
2333static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2334 const struct cntr_entry *entry,
2335 void *context, int vl, int mode, u64 data)
2336{
2337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2338
2339 return dd->rcv_err_status_cnt[38];
2340}
2341
2342static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2343 const struct cntr_entry *entry,
2344 void *context, int vl, int mode, u64 data)
2345{
2346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2347
2348 return dd->rcv_err_status_cnt[37];
2349}
2350
2351static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2352 const struct cntr_entry *entry,
2353 void *context, int vl, int mode, u64 data)
2354{
2355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2356
2357 return dd->rcv_err_status_cnt[36];
2358}
2359
2360static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2361 const struct cntr_entry *entry,
2362 void *context, int vl, int mode, u64 data)
2363{
2364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2365
2366 return dd->rcv_err_status_cnt[35];
2367}
2368
2369static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2370 const struct cntr_entry *entry,
2371 void *context, int vl, int mode, u64 data)
2372{
2373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2374
2375 return dd->rcv_err_status_cnt[34];
2376}
2377
2378static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2379 const struct cntr_entry *entry,
2380 void *context, int vl, int mode, u64 data)
2381{
2382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2383
2384 return dd->rcv_err_status_cnt[33];
2385}
2386
2387static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2388 void *context, int vl, int mode,
2389 u64 data)
2390{
2391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2392
2393 return dd->rcv_err_status_cnt[32];
2394}
2395
2396static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2397 void *context, int vl, int mode,
2398 u64 data)
2399{
2400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2401
2402 return dd->rcv_err_status_cnt[31];
2403}
2404
2405static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2406 void *context, int vl, int mode,
2407 u64 data)
2408{
2409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2410
2411 return dd->rcv_err_status_cnt[30];
2412}
2413
2414static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2415 void *context, int vl, int mode,
2416 u64 data)
2417{
2418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2419
2420 return dd->rcv_err_status_cnt[29];
2421}
2422
2423static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2424 void *context, int vl,
2425 int mode, u64 data)
2426{
2427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2428
2429 return dd->rcv_err_status_cnt[28];
2430}
2431
2432static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2433 const struct cntr_entry *entry,
2434 void *context, int vl, int mode, u64 data)
2435{
2436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2437
2438 return dd->rcv_err_status_cnt[27];
2439}
2440
2441static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2442 const struct cntr_entry *entry,
2443 void *context, int vl, int mode, u64 data)
2444{
2445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2446
2447 return dd->rcv_err_status_cnt[26];
2448}
2449
2450static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2451 const struct cntr_entry *entry,
2452 void *context, int vl, int mode, u64 data)
2453{
2454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2455
2456 return dd->rcv_err_status_cnt[25];
2457}
2458
2459static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2460 const struct cntr_entry *entry,
2461 void *context, int vl, int mode, u64 data)
2462{
2463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2464
2465 return dd->rcv_err_status_cnt[24];
2466}
2467
2468static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2469 const struct cntr_entry *entry,
2470 void *context, int vl, int mode, u64 data)
2471{
2472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2473
2474 return dd->rcv_err_status_cnt[23];
2475}
2476
2477static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2478 const struct cntr_entry *entry,
2479 void *context, int vl, int mode, u64 data)
2480{
2481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2482
2483 return dd->rcv_err_status_cnt[22];
2484}
2485
2486static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2487 const struct cntr_entry *entry,
2488 void *context, int vl, int mode, u64 data)
2489{
2490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2491
2492 return dd->rcv_err_status_cnt[21];
2493}
2494
2495static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2496 const struct cntr_entry *entry,
2497 void *context, int vl, int mode, u64 data)
2498{
2499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2500
2501 return dd->rcv_err_status_cnt[20];
2502}
2503
2504static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2505 const struct cntr_entry *entry,
2506 void *context, int vl, int mode, u64 data)
2507{
2508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2509
2510 return dd->rcv_err_status_cnt[19];
2511}
2512
2513static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2514 void *context, int vl,
2515 int mode, u64 data)
2516{
2517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2518
2519 return dd->rcv_err_status_cnt[18];
2520}
2521
2522static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2523 void *context, int vl,
2524 int mode, u64 data)
2525{
2526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2527
2528 return dd->rcv_err_status_cnt[17];
2529}
2530
2531static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2532 const struct cntr_entry *entry,
2533 void *context, int vl, int mode, u64 data)
2534{
2535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2536
2537 return dd->rcv_err_status_cnt[16];
2538}
2539
2540static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2541 const struct cntr_entry *entry,
2542 void *context, int vl, int mode, u64 data)
2543{
2544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2545
2546 return dd->rcv_err_status_cnt[15];
2547}
2548
2549static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2550 void *context, int vl,
2551 int mode, u64 data)
2552{
2553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2554
2555 return dd->rcv_err_status_cnt[14];
2556}
2557
2558static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2559 void *context, int vl,
2560 int mode, u64 data)
2561{
2562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2563
2564 return dd->rcv_err_status_cnt[13];
2565}
2566
2567static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2568 void *context, int vl, int mode,
2569 u64 data)
2570{
2571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2572
2573 return dd->rcv_err_status_cnt[12];
2574}
2575
2576static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2577 void *context, int vl, int mode,
2578 u64 data)
2579{
2580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2581
2582 return dd->rcv_err_status_cnt[11];
2583}
2584
2585static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2586 void *context, int vl, int mode,
2587 u64 data)
2588{
2589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2590
2591 return dd->rcv_err_status_cnt[10];
2592}
2593
2594static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2595 void *context, int vl, int mode,
2596 u64 data)
2597{
2598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2599
2600 return dd->rcv_err_status_cnt[9];
2601}
2602
2603static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2604 void *context, int vl, int mode,
2605 u64 data)
2606{
2607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2608
2609 return dd->rcv_err_status_cnt[8];
2610}
2611
2612static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2613 const struct cntr_entry *entry,
2614 void *context, int vl, int mode, u64 data)
2615{
2616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2617
2618 return dd->rcv_err_status_cnt[7];
2619}
2620
2621static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2622 const struct cntr_entry *entry,
2623 void *context, int vl, int mode, u64 data)
2624{
2625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2626
2627 return dd->rcv_err_status_cnt[6];
2628}
2629
2630static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2631 void *context, int vl, int mode,
2632 u64 data)
2633{
2634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2635
2636 return dd->rcv_err_status_cnt[5];
2637}
2638
2639static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2640 void *context, int vl, int mode,
2641 u64 data)
2642{
2643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2644
2645 return dd->rcv_err_status_cnt[4];
2646}
2647
2648static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2649 void *context, int vl, int mode,
2650 u64 data)
2651{
2652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2653
2654 return dd->rcv_err_status_cnt[3];
2655}
2656
2657static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2658 void *context, int vl, int mode,
2659 u64 data)
2660{
2661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2662
2663 return dd->rcv_err_status_cnt[2];
2664}
2665
2666static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2667 void *context, int vl, int mode,
2668 u64 data)
2669{
2670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2671
2672 return dd->rcv_err_status_cnt[1];
2673}
2674
2675static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2676 void *context, int vl, int mode,
2677 u64 data)
2678{
2679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2680
2681 return dd->rcv_err_status_cnt[0];
2682}
2683
2684/*
2685 * Software counters corresponding to each of the
2686 * error status bits within SendPioErrStatus
2687 */
2688static u64 access_pio_pec_sop_head_parity_err_cnt(
2689 const struct cntr_entry *entry,
2690 void *context, int vl, int mode, u64 data)
2691{
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694 return dd->send_pio_err_status_cnt[35];
2695}
2696
2697static u64 access_pio_pcc_sop_head_parity_err_cnt(
2698 const struct cntr_entry *entry,
2699 void *context, int vl, int mode, u64 data)
2700{
2701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703 return dd->send_pio_err_status_cnt[34];
2704}
2705
2706static u64 access_pio_last_returned_cnt_parity_err_cnt(
2707 const struct cntr_entry *entry,
2708 void *context, int vl, int mode, u64 data)
2709{
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712 return dd->send_pio_err_status_cnt[33];
2713}
2714
2715static u64 access_pio_current_free_cnt_parity_err_cnt(
2716 const struct cntr_entry *entry,
2717 void *context, int vl, int mode, u64 data)
2718{
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721 return dd->send_pio_err_status_cnt[32];
2722}
2723
2724static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2725 void *context, int vl, int mode,
2726 u64 data)
2727{
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730 return dd->send_pio_err_status_cnt[31];
2731}
2732
2733static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2734 void *context, int vl, int mode,
2735 u64 data)
2736{
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739 return dd->send_pio_err_status_cnt[30];
2740}
2741
2742static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2744 u64 data)
2745{
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748 return dd->send_pio_err_status_cnt[29];
2749}
2750
2751static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2752 const struct cntr_entry *entry,
2753 void *context, int vl, int mode, u64 data)
2754{
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757 return dd->send_pio_err_status_cnt[28];
2758}
2759
2760static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2762 u64 data)
2763{
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766 return dd->send_pio_err_status_cnt[27];
2767}
2768
2769static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2770 void *context, int vl, int mode,
2771 u64 data)
2772{
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775 return dd->send_pio_err_status_cnt[26];
2776}
2777
2778static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl,
2780 int mode, u64 data)
2781{
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784 return dd->send_pio_err_status_cnt[25];
2785}
2786
2787static u64 access_pio_block_qw_count_parity_err_cnt(
2788 const struct cntr_entry *entry,
2789 void *context, int vl, int mode, u64 data)
2790{
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793 return dd->send_pio_err_status_cnt[24];
2794}
2795
2796static u64 access_pio_write_qw_valid_parity_err_cnt(
2797 const struct cntr_entry *entry,
2798 void *context, int vl, int mode, u64 data)
2799{
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2801
2802 return dd->send_pio_err_status_cnt[23];
2803}
2804
2805static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2806 void *context, int vl, int mode,
2807 u64 data)
2808{
2809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2810
2811 return dd->send_pio_err_status_cnt[22];
2812}
2813
2814static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2815 void *context, int vl,
2816 int mode, u64 data)
2817{
2818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2819
2820 return dd->send_pio_err_status_cnt[21];
2821}
2822
2823static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2824 void *context, int vl,
2825 int mode, u64 data)
2826{
2827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2828
2829 return dd->send_pio_err_status_cnt[20];
2830}
2831
2832static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2833 void *context, int vl,
2834 int mode, u64 data)
2835{
2836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2837
2838 return dd->send_pio_err_status_cnt[19];
2839}
2840
2841static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2842 const struct cntr_entry *entry,
2843 void *context, int vl, int mode, u64 data)
2844{
2845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2846
2847 return dd->send_pio_err_status_cnt[18];
2848}
2849
2850static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2851 void *context, int vl, int mode,
2852 u64 data)
2853{
2854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2855
2856 return dd->send_pio_err_status_cnt[17];
2857}
2858
2859static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2860 void *context, int vl, int mode,
2861 u64 data)
2862{
2863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2864
2865 return dd->send_pio_err_status_cnt[16];
2866}
2867
2868static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2869 const struct cntr_entry *entry,
2870 void *context, int vl, int mode, u64 data)
2871{
2872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2873
2874 return dd->send_pio_err_status_cnt[15];
2875}
2876
2877static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2878 const struct cntr_entry *entry,
2879 void *context, int vl, int mode, u64 data)
2880{
2881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2882
2883 return dd->send_pio_err_status_cnt[14];
2884}
2885
2886static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2887 const struct cntr_entry *entry,
2888 void *context, int vl, int mode, u64 data)
2889{
2890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2891
2892 return dd->send_pio_err_status_cnt[13];
2893}
2894
2895static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2896 const struct cntr_entry *entry,
2897 void *context, int vl, int mode, u64 data)
2898{
2899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2900
2901 return dd->send_pio_err_status_cnt[12];
2902}
2903
2904static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2905 const struct cntr_entry *entry,
2906 void *context, int vl, int mode, u64 data)
2907{
2908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2909
2910 return dd->send_pio_err_status_cnt[11];
2911}
2912
2913static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2914 const struct cntr_entry *entry,
2915 void *context, int vl, int mode, u64 data)
2916{
2917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2918
2919 return dd->send_pio_err_status_cnt[10];
2920}
2921
2922static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2923 const struct cntr_entry *entry,
2924 void *context, int vl, int mode, u64 data)
2925{
2926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2927
2928 return dd->send_pio_err_status_cnt[9];
2929}
2930
2931static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2932 const struct cntr_entry *entry,
2933 void *context, int vl, int mode, u64 data)
2934{
2935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2936
2937 return dd->send_pio_err_status_cnt[8];
2938}
2939
2940static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2941 const struct cntr_entry *entry,
2942 void *context, int vl, int mode, u64 data)
2943{
2944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2945
2946 return dd->send_pio_err_status_cnt[7];
2947}
2948
2949static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2950 void *context, int vl, int mode,
2951 u64 data)
2952{
2953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2954
2955 return dd->send_pio_err_status_cnt[6];
2956}
2957
2958static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2959 void *context, int vl, int mode,
2960 u64 data)
2961{
2962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2963
2964 return dd->send_pio_err_status_cnt[5];
2965}
2966
2967static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2968 void *context, int vl, int mode,
2969 u64 data)
2970{
2971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2972
2973 return dd->send_pio_err_status_cnt[4];
2974}
2975
2976static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2977 void *context, int vl, int mode,
2978 u64 data)
2979{
2980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2981
2982 return dd->send_pio_err_status_cnt[3];
2983}
2984
2985static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2986 void *context, int vl, int mode,
2987 u64 data)
2988{
2989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2990
2991 return dd->send_pio_err_status_cnt[2];
2992}
2993
2994static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2995 void *context, int vl,
2996 int mode, u64 data)
2997{
2998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2999
3000 return dd->send_pio_err_status_cnt[1];
3001}
3002
3003static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3004 void *context, int vl, int mode,
3005 u64 data)
3006{
3007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3008
3009 return dd->send_pio_err_status_cnt[0];
3010}
3011
3012/*
3013 * Software counters corresponding to each of the
3014 * error status bits within SendDmaErrStatus
3015 */
3016static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3017 const struct cntr_entry *entry,
3018 void *context, int vl, int mode, u64 data)
3019{
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022 return dd->send_dma_err_status_cnt[3];
3023}
3024
3025static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3026 const struct cntr_entry *entry,
3027 void *context, int vl, int mode, u64 data)
3028{
3029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031 return dd->send_dma_err_status_cnt[2];
3032}
3033
3034static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3035 void *context, int vl, int mode,
3036 u64 data)
3037{
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040 return dd->send_dma_err_status_cnt[1];
3041}
3042
3043static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3044 void *context, int vl, int mode,
3045 u64 data)
3046{
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049 return dd->send_dma_err_status_cnt[0];
3050}
3051
3052/*
3053 * Software counters corresponding to each of the
3054 * error status bits within SendEgressErrStatus
3055 */
3056static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3057 const struct cntr_entry *entry,
3058 void *context, int vl, int mode, u64 data)
3059{
3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061
3062 return dd->send_egress_err_status_cnt[63];
3063}
3064
3065static u64 access_tx_read_sdma_memory_csr_err_cnt(
3066 const struct cntr_entry *entry,
3067 void *context, int vl, int mode, u64 data)
3068{
3069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3070
3071 return dd->send_egress_err_status_cnt[62];
3072}
3073
3074static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3075 void *context, int vl, int mode,
3076 u64 data)
3077{
3078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3079
3080 return dd->send_egress_err_status_cnt[61];
3081}
3082
3083static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3084 void *context, int vl,
3085 int mode, u64 data)
3086{
3087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3088
3089 return dd->send_egress_err_status_cnt[60];
3090}
3091
3092static u64 access_tx_read_sdma_memory_cor_err_cnt(
3093 const struct cntr_entry *entry,
3094 void *context, int vl, int mode, u64 data)
3095{
3096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3097
3098 return dd->send_egress_err_status_cnt[59];
3099}
3100
3101static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3102 void *context, int vl, int mode,
3103 u64 data)
3104{
3105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3106
3107 return dd->send_egress_err_status_cnt[58];
3108}
3109
3110static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3111 void *context, int vl, int mode,
3112 u64 data)
3113{
3114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3115
3116 return dd->send_egress_err_status_cnt[57];
3117}
3118
3119static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3120 void *context, int vl, int mode,
3121 u64 data)
3122{
3123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3124
3125 return dd->send_egress_err_status_cnt[56];
3126}
3127
3128static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3129 void *context, int vl, int mode,
3130 u64 data)
3131{
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134 return dd->send_egress_err_status_cnt[55];
3135}
3136
3137static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3138 void *context, int vl, int mode,
3139 u64 data)
3140{
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143 return dd->send_egress_err_status_cnt[54];
3144}
3145
3146static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3148 u64 data)
3149{
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152 return dd->send_egress_err_status_cnt[53];
3153}
3154
3155static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3157 u64 data)
3158{
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161 return dd->send_egress_err_status_cnt[52];
3162}
3163
3164static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3166 u64 data)
3167{
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3169
3170 return dd->send_egress_err_status_cnt[51];
3171}
3172
3173static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3174 void *context, int vl, int mode,
3175 u64 data)
3176{
3177 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3178
3179 return dd->send_egress_err_status_cnt[50];
3180}
3181
3182static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3183 void *context, int vl, int mode,
3184 u64 data)
3185{
3186 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3187
3188 return dd->send_egress_err_status_cnt[49];
3189}
3190
3191static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3192 void *context, int vl, int mode,
3193 u64 data)
3194{
3195 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3196
3197 return dd->send_egress_err_status_cnt[48];
3198}
3199
3200static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3201 void *context, int vl, int mode,
3202 u64 data)
3203{
3204 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3205
3206 return dd->send_egress_err_status_cnt[47];
3207}
3208
3209static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3210 void *context, int vl, int mode,
3211 u64 data)
3212{
3213 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3214
3215 return dd->send_egress_err_status_cnt[46];
3216}
3217
3218static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3219 void *context, int vl, int mode,
3220 u64 data)
3221{
3222 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3223
3224 return dd->send_egress_err_status_cnt[45];
3225}
3226
3227static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3228 void *context, int vl,
3229 int mode, u64 data)
3230{
3231 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3232
3233 return dd->send_egress_err_status_cnt[44];
3234}
3235
3236static u64 access_tx_read_sdma_memory_unc_err_cnt(
3237 const struct cntr_entry *entry,
3238 void *context, int vl, int mode, u64 data)
3239{
3240 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3241
3242 return dd->send_egress_err_status_cnt[43];
3243}
3244
3245static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3246 void *context, int vl, int mode,
3247 u64 data)
3248{
3249 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3250
3251 return dd->send_egress_err_status_cnt[42];
3252}
3253
3254static u64 access_tx_credit_return_partiy_err_cnt(
3255 const struct cntr_entry *entry,
3256 void *context, int vl, int mode, u64 data)
3257{
3258 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3259
3260 return dd->send_egress_err_status_cnt[41];
3261}
3262
3263static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3264 const struct cntr_entry *entry,
3265 void *context, int vl, int mode, u64 data)
3266{
3267 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3268
3269 return dd->send_egress_err_status_cnt[40];
3270}
3271
3272static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3273 const struct cntr_entry *entry,
3274 void *context, int vl, int mode, u64 data)
3275{
3276 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3277
3278 return dd->send_egress_err_status_cnt[39];
3279}
3280
3281static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3282 const struct cntr_entry *entry,
3283 void *context, int vl, int mode, u64 data)
3284{
3285 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3286
3287 return dd->send_egress_err_status_cnt[38];
3288}
3289
3290static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3291 const struct cntr_entry *entry,
3292 void *context, int vl, int mode, u64 data)
3293{
3294 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3295
3296 return dd->send_egress_err_status_cnt[37];
3297}
3298
3299static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3300 const struct cntr_entry *entry,
3301 void *context, int vl, int mode, u64 data)
3302{
3303 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3304
3305 return dd->send_egress_err_status_cnt[36];
3306}
3307
3308static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3309 const struct cntr_entry *entry,
3310 void *context, int vl, int mode, u64 data)
3311{
3312 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3313
3314 return dd->send_egress_err_status_cnt[35];
3315}
3316
3317static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3318 const struct cntr_entry *entry,
3319 void *context, int vl, int mode, u64 data)
3320{
3321 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3322
3323 return dd->send_egress_err_status_cnt[34];
3324}
3325
3326static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3327 const struct cntr_entry *entry,
3328 void *context, int vl, int mode, u64 data)
3329{
3330 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3331
3332 return dd->send_egress_err_status_cnt[33];
3333}
3334
3335static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3336 const struct cntr_entry *entry,
3337 void *context, int vl, int mode, u64 data)
3338{
3339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3340
3341 return dd->send_egress_err_status_cnt[32];
3342}
3343
3344static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3345 const struct cntr_entry *entry,
3346 void *context, int vl, int mode, u64 data)
3347{
3348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3349
3350 return dd->send_egress_err_status_cnt[31];
3351}
3352
3353static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3354 const struct cntr_entry *entry,
3355 void *context, int vl, int mode, u64 data)
3356{
3357 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3358
3359 return dd->send_egress_err_status_cnt[30];
3360}
3361
3362static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3363 const struct cntr_entry *entry,
3364 void *context, int vl, int mode, u64 data)
3365{
3366 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3367
3368 return dd->send_egress_err_status_cnt[29];
3369}
3370
3371static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3372 const struct cntr_entry *entry,
3373 void *context, int vl, int mode, u64 data)
3374{
3375 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3376
3377 return dd->send_egress_err_status_cnt[28];
3378}
3379
3380static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3381 const struct cntr_entry *entry,
3382 void *context, int vl, int mode, u64 data)
3383{
3384 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3385
3386 return dd->send_egress_err_status_cnt[27];
3387}
3388
3389static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3390 const struct cntr_entry *entry,
3391 void *context, int vl, int mode, u64 data)
3392{
3393 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3394
3395 return dd->send_egress_err_status_cnt[26];
3396}
3397
3398static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3399 const struct cntr_entry *entry,
3400 void *context, int vl, int mode, u64 data)
3401{
3402 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3403
3404 return dd->send_egress_err_status_cnt[25];
3405}
3406
3407static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3408 const struct cntr_entry *entry,
3409 void *context, int vl, int mode, u64 data)
3410{
3411 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3412
3413 return dd->send_egress_err_status_cnt[24];
3414}
3415
3416static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3417 const struct cntr_entry *entry,
3418 void *context, int vl, int mode, u64 data)
3419{
3420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3421
3422 return dd->send_egress_err_status_cnt[23];
3423}
3424
3425static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3426 const struct cntr_entry *entry,
3427 void *context, int vl, int mode, u64 data)
3428{
3429 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3430
3431 return dd->send_egress_err_status_cnt[22];
3432}
3433
3434static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3435 const struct cntr_entry *entry,
3436 void *context, int vl, int mode, u64 data)
3437{
3438 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3439
3440 return dd->send_egress_err_status_cnt[21];
3441}
3442
3443static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3444 const struct cntr_entry *entry,
3445 void *context, int vl, int mode, u64 data)
3446{
3447 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3448
3449 return dd->send_egress_err_status_cnt[20];
3450}
3451
3452static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3453 const struct cntr_entry *entry,
3454 void *context, int vl, int mode, u64 data)
3455{
3456 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3457
3458 return dd->send_egress_err_status_cnt[19];
3459}
3460
3461static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3462 const struct cntr_entry *entry,
3463 void *context, int vl, int mode, u64 data)
3464{
3465 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3466
3467 return dd->send_egress_err_status_cnt[18];
3468}
3469
3470static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3471 const struct cntr_entry *entry,
3472 void *context, int vl, int mode, u64 data)
3473{
3474 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3475
3476 return dd->send_egress_err_status_cnt[17];
3477}
3478
3479static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3480 const struct cntr_entry *entry,
3481 void *context, int vl, int mode, u64 data)
3482{
3483 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3484
3485 return dd->send_egress_err_status_cnt[16];
3486}
3487
3488static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3489 void *context, int vl, int mode,
3490 u64 data)
3491{
3492 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3493
3494 return dd->send_egress_err_status_cnt[15];
3495}
3496
3497static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3498 void *context, int vl,
3499 int mode, u64 data)
3500{
3501 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3502
3503 return dd->send_egress_err_status_cnt[14];
3504}
3505
3506static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3507 void *context, int vl, int mode,
3508 u64 data)
3509{
3510 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3511
3512 return dd->send_egress_err_status_cnt[13];
3513}
3514
3515static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3516 void *context, int vl, int mode,
3517 u64 data)
3518{
3519 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3520
3521 return dd->send_egress_err_status_cnt[12];
3522}
3523
3524static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3525 const struct cntr_entry *entry,
3526 void *context, int vl, int mode, u64 data)
3527{
3528 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3529
3530 return dd->send_egress_err_status_cnt[11];
3531}
3532
3533static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3534 void *context, int vl, int mode,
3535 u64 data)
3536{
3537 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3538
3539 return dd->send_egress_err_status_cnt[10];
3540}
3541
3542static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3543 void *context, int vl, int mode,
3544 u64 data)
3545{
3546 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3547
3548 return dd->send_egress_err_status_cnt[9];
3549}
3550
3551static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3552 const struct cntr_entry *entry,
3553 void *context, int vl, int mode, u64 data)
3554{
3555 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3556
3557 return dd->send_egress_err_status_cnt[8];
3558}
3559
3560static u64 access_tx_pio_launch_intf_parity_err_cnt(
3561 const struct cntr_entry *entry,
3562 void *context, int vl, int mode, u64 data)
3563{
3564 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3565
3566 return dd->send_egress_err_status_cnt[7];
3567}
3568
3569static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3570 void *context, int vl, int mode,
3571 u64 data)
3572{
3573 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3574
3575 return dd->send_egress_err_status_cnt[6];
3576}
3577
3578static u64 access_tx_incorrect_link_state_err_cnt(
3579 const struct cntr_entry *entry,
3580 void *context, int vl, int mode, u64 data)
3581{
3582 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3583
3584 return dd->send_egress_err_status_cnt[5];
3585}
3586
3587static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3588 void *context, int vl, int mode,
3589 u64 data)
3590{
3591 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3592
3593 return dd->send_egress_err_status_cnt[4];
3594}
3595
3596static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3597 const struct cntr_entry *entry,
3598 void *context, int vl, int mode, u64 data)
3599{
3600 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3601
3602 return dd->send_egress_err_status_cnt[3];
3603}
3604
3605static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3606 void *context, int vl, int mode,
3607 u64 data)
3608{
3609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3610
3611 return dd->send_egress_err_status_cnt[2];
3612}
3613
3614static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3615 const struct cntr_entry *entry,
3616 void *context, int vl, int mode, u64 data)
3617{
3618 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3619
3620 return dd->send_egress_err_status_cnt[1];
3621}
3622
3623static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3624 const struct cntr_entry *entry,
3625 void *context, int vl, int mode, u64 data)
3626{
3627 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3628
3629 return dd->send_egress_err_status_cnt[0];
3630}
3631
3632/*
3633 * Software counters corresponding to each of the
3634 * error status bits within SendErrStatus
3635 */
3636static u64 access_send_csr_write_bad_addr_err_cnt(
3637 const struct cntr_entry *entry,
3638 void *context, int vl, int mode, u64 data)
3639{
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642 return dd->send_err_status_cnt[2];
3643}
3644
3645static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3646 void *context, int vl,
3647 int mode, u64 data)
3648{
3649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651 return dd->send_err_status_cnt[1];
3652}
3653
3654static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3655 void *context, int vl, int mode,
3656 u64 data)
3657{
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660 return dd->send_err_status_cnt[0];
3661}
3662
3663/*
3664 * Software counters corresponding to each of the
3665 * error status bits within SendCtxtErrStatus
3666 */
3667static u64 access_pio_write_out_of_bounds_err_cnt(
3668 const struct cntr_entry *entry,
3669 void *context, int vl, int mode, u64 data)
3670{
3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672
3673 return dd->sw_ctxt_err_status_cnt[4];
3674}
3675
3676static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3677 void *context, int vl, int mode,
3678 u64 data)
3679{
3680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3681
3682 return dd->sw_ctxt_err_status_cnt[3];
3683}
3684
3685static u64 access_pio_write_crosses_boundary_err_cnt(
3686 const struct cntr_entry *entry,
3687 void *context, int vl, int mode, u64 data)
3688{
3689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3690
3691 return dd->sw_ctxt_err_status_cnt[2];
3692}
3693
3694static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3695 void *context, int vl,
3696 int mode, u64 data)
3697{
3698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3699
3700 return dd->sw_ctxt_err_status_cnt[1];
3701}
3702
3703static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3704 void *context, int vl, int mode,
3705 u64 data)
3706{
3707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3708
3709 return dd->sw_ctxt_err_status_cnt[0];
3710}
3711
3712/*
3713 * Software counters corresponding to each of the
3714 * error status bits within SendDmaEngErrStatus
3715 */
3716static u64 access_sdma_header_request_fifo_cor_err_cnt(
3717 const struct cntr_entry *entry,
3718 void *context, int vl, int mode, u64 data)
3719{
3720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722 return dd->sw_send_dma_eng_err_status_cnt[23];
3723}
3724
3725static u64 access_sdma_header_storage_cor_err_cnt(
3726 const struct cntr_entry *entry,
3727 void *context, int vl, int mode, u64 data)
3728{
3729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3730
3731 return dd->sw_send_dma_eng_err_status_cnt[22];
3732}
3733
3734static u64 access_sdma_packet_tracking_cor_err_cnt(
3735 const struct cntr_entry *entry,
3736 void *context, int vl, int mode, u64 data)
3737{
3738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3739
3740 return dd->sw_send_dma_eng_err_status_cnt[21];
3741}
3742
3743static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3744 void *context, int vl, int mode,
3745 u64 data)
3746{
3747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3748
3749 return dd->sw_send_dma_eng_err_status_cnt[20];
3750}
3751
3752static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3753 void *context, int vl, int mode,
3754 u64 data)
3755{
3756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3757
3758 return dd->sw_send_dma_eng_err_status_cnt[19];
3759}
3760
3761static u64 access_sdma_header_request_fifo_unc_err_cnt(
3762 const struct cntr_entry *entry,
3763 void *context, int vl, int mode, u64 data)
3764{
3765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3766
3767 return dd->sw_send_dma_eng_err_status_cnt[18];
3768}
3769
3770static u64 access_sdma_header_storage_unc_err_cnt(
3771 const struct cntr_entry *entry,
3772 void *context, int vl, int mode, u64 data)
3773{
3774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3775
3776 return dd->sw_send_dma_eng_err_status_cnt[17];
3777}
3778
3779static u64 access_sdma_packet_tracking_unc_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3782{
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785 return dd->sw_send_dma_eng_err_status_cnt[16];
3786}
3787
3788static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3789 void *context, int vl, int mode,
3790 u64 data)
3791{
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794 return dd->sw_send_dma_eng_err_status_cnt[15];
3795}
3796
3797static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3798 void *context, int vl, int mode,
3799 u64 data)
3800{
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803 return dd->sw_send_dma_eng_err_status_cnt[14];
3804}
3805
3806static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl, int mode,
3808 u64 data)
3809{
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812 return dd->sw_send_dma_eng_err_status_cnt[13];
3813}
3814
3815static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3817 u64 data)
3818{
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821 return dd->sw_send_dma_eng_err_status_cnt[12];
3822}
3823
3824static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3826 u64 data)
3827{
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3829
3830 return dd->sw_send_dma_eng_err_status_cnt[11];
3831}
3832
3833static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3834 void *context, int vl, int mode,
3835 u64 data)
3836{
3837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3838
3839 return dd->sw_send_dma_eng_err_status_cnt[10];
3840}
3841
3842static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3843 void *context, int vl, int mode,
3844 u64 data)
3845{
3846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3847
3848 return dd->sw_send_dma_eng_err_status_cnt[9];
3849}
3850
3851static u64 access_sdma_packet_desc_overflow_err_cnt(
3852 const struct cntr_entry *entry,
3853 void *context, int vl, int mode, u64 data)
3854{
3855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3856
3857 return dd->sw_send_dma_eng_err_status_cnt[8];
3858}
3859
3860static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3861 void *context, int vl,
3862 int mode, u64 data)
3863{
3864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3865
3866 return dd->sw_send_dma_eng_err_status_cnt[7];
3867}
3868
3869static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3870 void *context, int vl, int mode, u64 data)
3871{
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873
3874 return dd->sw_send_dma_eng_err_status_cnt[6];
3875}
3876
3877static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3878 void *context, int vl, int mode,
3879 u64 data)
3880{
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882
3883 return dd->sw_send_dma_eng_err_status_cnt[5];
3884}
3885
3886static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3887 void *context, int vl, int mode,
3888 u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[4];
3893}
3894
3895static u64 access_sdma_tail_out_of_bounds_err_cnt(
3896 const struct cntr_entry *entry,
3897 void *context, int vl, int mode, u64 data)
3898{
3899 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3900
3901 return dd->sw_send_dma_eng_err_status_cnt[3];
3902}
3903
3904static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3905 void *context, int vl, int mode,
3906 u64 data)
3907{
3908 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3909
3910 return dd->sw_send_dma_eng_err_status_cnt[2];
3911}
3912
3913static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3914 void *context, int vl, int mode,
3915 u64 data)
3916{
3917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3918
3919 return dd->sw_send_dma_eng_err_status_cnt[1];
3920}
3921
3922static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3923 void *context, int vl, int mode,
3924 u64 data)
3925{
3926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3927
3928 return dd->sw_send_dma_eng_err_status_cnt[0];
3929}
3930
Mike Marciniszyn77241052015-07-30 15:17:43 -04003931#define def_access_sw_cpu(cntr) \
3932static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3933 void *context, int vl, int mode, u64 data) \
3934{ \
3935 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3936 return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr, \
3937 ppd->ibport_data.cntr, vl, \
3938 mode, data); \
3939}
3940
3941def_access_sw_cpu(rc_acks);
3942def_access_sw_cpu(rc_qacks);
3943def_access_sw_cpu(rc_delayed_comp);
3944
3945#define def_access_ibp_counter(cntr) \
3946static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3947 void *context, int vl, int mode, u64 data) \
3948{ \
3949 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3950 \
3951 if (vl != CNTR_INVALID_VL) \
3952 return 0; \
3953 \
3954 return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr, \
3955 mode, data); \
3956}
3957
3958def_access_ibp_counter(loop_pkts);
3959def_access_ibp_counter(rc_resends);
3960def_access_ibp_counter(rnr_naks);
3961def_access_ibp_counter(other_naks);
3962def_access_ibp_counter(rc_timeouts);
3963def_access_ibp_counter(pkt_drops);
3964def_access_ibp_counter(dmawait);
3965def_access_ibp_counter(rc_seqnak);
3966def_access_ibp_counter(rc_dupreq);
3967def_access_ibp_counter(rdma_seq);
3968def_access_ibp_counter(unaligned);
3969def_access_ibp_counter(seq_naks);
3970
3971static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3972[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3973[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3974 CNTR_NORMAL),
3975[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3976 CNTR_NORMAL),
3977[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3978 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3979 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003980[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3981 CNTR_NORMAL),
3982[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3983 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3984[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3985 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3986[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3987 CNTR_NORMAL),
3988[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3989 CNTR_NORMAL),
3990[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3991 CNTR_NORMAL),
3992[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3993 CNTR_NORMAL),
3994[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3995 CNTR_NORMAL),
3996[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3997 CNTR_NORMAL),
3998[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
3999 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4000[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4001 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4002[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4003 CNTR_SYNTH),
4004[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4005[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4006 CNTR_SYNTH),
4007[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4008 CNTR_SYNTH),
4009[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4010 CNTR_SYNTH),
4011[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4012 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4013[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4014 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4015 CNTR_SYNTH),
4016[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4017 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4018[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4019 CNTR_SYNTH),
4020[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4021 CNTR_SYNTH),
4022[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4023 CNTR_SYNTH),
4024[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4025 CNTR_SYNTH),
4026[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4027 CNTR_SYNTH),
4028[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4029 CNTR_SYNTH),
4030[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4031 CNTR_SYNTH),
4032[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4033 CNTR_SYNTH | CNTR_VL),
4034[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4035 CNTR_SYNTH | CNTR_VL),
4036[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4037[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4038 CNTR_SYNTH | CNTR_VL),
4039[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4040[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4041 CNTR_SYNTH | CNTR_VL),
4042[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4043 CNTR_SYNTH),
4044[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4045 CNTR_SYNTH | CNTR_VL),
4046[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4047 CNTR_SYNTH),
4048[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4049 CNTR_SYNTH | CNTR_VL),
4050[C_DC_TOTAL_CRC] =
4051 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4052 CNTR_SYNTH),
4053[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4054 CNTR_SYNTH),
4055[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4056 CNTR_SYNTH),
4057[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4058 CNTR_SYNTH),
4059[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4060 CNTR_SYNTH),
4061[C_DC_CRC_MULT_LN] =
4062 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4063 CNTR_SYNTH),
4064[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4065 CNTR_SYNTH),
4066[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4067 CNTR_SYNTH),
4068[C_DC_SEQ_CRC_CNT] =
4069 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4070 CNTR_SYNTH),
4071[C_DC_ESC0_ONLY_CNT] =
4072 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4073 CNTR_SYNTH),
4074[C_DC_ESC0_PLUS1_CNT] =
4075 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4076 CNTR_SYNTH),
4077[C_DC_ESC0_PLUS2_CNT] =
4078 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4079 CNTR_SYNTH),
4080[C_DC_REINIT_FROM_PEER_CNT] =
4081 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4082 CNTR_SYNTH),
4083[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4084 CNTR_SYNTH),
4085[C_DC_MISC_FLG_CNT] =
4086 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4087 CNTR_SYNTH),
4088[C_DC_PRF_GOOD_LTP_CNT] =
4089 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4090[C_DC_PRF_ACCEPTED_LTP_CNT] =
4091 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4092 CNTR_SYNTH),
4093[C_DC_PRF_RX_FLIT_CNT] =
4094 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4095[C_DC_PRF_TX_FLIT_CNT] =
4096 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4097[C_DC_PRF_CLK_CNTR] =
4098 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4099[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4100 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4101[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4102 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4103 CNTR_SYNTH),
4104[C_DC_PG_STS_TX_SBE_CNT] =
4105 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4106[C_DC_PG_STS_TX_MBE_CNT] =
4107 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4108 CNTR_SYNTH),
4109[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4110 access_sw_cpu_intr),
4111[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4112 access_sw_cpu_rcv_limit),
4113[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4114 access_sw_vtx_wait),
4115[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4116 access_sw_pio_wait),
4117[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4118 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004119[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4120 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004121[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4122 SEND_DMA_DESC_FETCHED_CNT, 0,
4123 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4124 dev_access_u32_csr),
4125[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4126 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4127 access_sde_int_cnt),
4128[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4129 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4130 access_sde_err_cnt),
4131[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4132 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4133 access_sde_idle_int_cnt),
4134[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4135 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4136 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004137/* MISC_ERR_STATUS */
4138[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4139 CNTR_NORMAL,
4140 access_misc_pll_lock_fail_err_cnt),
4141[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4142 CNTR_NORMAL,
4143 access_misc_mbist_fail_err_cnt),
4144[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4145 CNTR_NORMAL,
4146 access_misc_invalid_eep_cmd_err_cnt),
4147[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4148 CNTR_NORMAL,
4149 access_misc_efuse_done_parity_err_cnt),
4150[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4151 CNTR_NORMAL,
4152 access_misc_efuse_write_err_cnt),
4153[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4154 0, CNTR_NORMAL,
4155 access_misc_efuse_read_bad_addr_err_cnt),
4156[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4157 CNTR_NORMAL,
4158 access_misc_efuse_csr_parity_err_cnt),
4159[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4160 CNTR_NORMAL,
4161 access_misc_fw_auth_failed_err_cnt),
4162[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4163 CNTR_NORMAL,
4164 access_misc_key_mismatch_err_cnt),
4165[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4166 CNTR_NORMAL,
4167 access_misc_sbus_write_failed_err_cnt),
4168[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4169 CNTR_NORMAL,
4170 access_misc_csr_write_bad_addr_err_cnt),
4171[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4172 CNTR_NORMAL,
4173 access_misc_csr_read_bad_addr_err_cnt),
4174[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4175 CNTR_NORMAL,
4176 access_misc_csr_parity_err_cnt),
4177/* CceErrStatus */
4178[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4179 CNTR_NORMAL,
4180 access_sw_cce_err_status_aggregated_cnt),
4181[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4182 CNTR_NORMAL,
4183 access_cce_msix_csr_parity_err_cnt),
4184[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4185 CNTR_NORMAL,
4186 access_cce_int_map_unc_err_cnt),
4187[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4188 CNTR_NORMAL,
4189 access_cce_int_map_cor_err_cnt),
4190[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4191 CNTR_NORMAL,
4192 access_cce_msix_table_unc_err_cnt),
4193[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4194 CNTR_NORMAL,
4195 access_cce_msix_table_cor_err_cnt),
4196[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4197 0, CNTR_NORMAL,
4198 access_cce_rxdma_conv_fifo_parity_err_cnt),
4199[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4200 0, CNTR_NORMAL,
4201 access_cce_rcpl_async_fifo_parity_err_cnt),
4202[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4203 CNTR_NORMAL,
4204 access_cce_seg_write_bad_addr_err_cnt),
4205[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4206 CNTR_NORMAL,
4207 access_cce_seg_read_bad_addr_err_cnt),
4208[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4209 CNTR_NORMAL,
4210 access_la_triggered_cnt),
4211[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4212 CNTR_NORMAL,
4213 access_cce_trgt_cpl_timeout_err_cnt),
4214[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4215 CNTR_NORMAL,
4216 access_pcic_receive_parity_err_cnt),
4217[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4218 CNTR_NORMAL,
4219 access_pcic_transmit_back_parity_err_cnt),
4220[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4221 0, CNTR_NORMAL,
4222 access_pcic_transmit_front_parity_err_cnt),
4223[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4224 CNTR_NORMAL,
4225 access_pcic_cpl_dat_q_unc_err_cnt),
4226[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4227 CNTR_NORMAL,
4228 access_pcic_cpl_hd_q_unc_err_cnt),
4229[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4230 CNTR_NORMAL,
4231 access_pcic_post_dat_q_unc_err_cnt),
4232[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_pcic_post_hd_q_unc_err_cnt),
4235[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_pcic_retry_sot_mem_unc_err_cnt),
4238[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_pcic_retry_mem_unc_err),
4241[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4242 CNTR_NORMAL,
4243 access_pcic_n_post_dat_q_parity_err_cnt),
4244[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4245 CNTR_NORMAL,
4246 access_pcic_n_post_h_q_parity_err_cnt),
4247[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4248 CNTR_NORMAL,
4249 access_pcic_cpl_dat_q_cor_err_cnt),
4250[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4251 CNTR_NORMAL,
4252 access_pcic_cpl_hd_q_cor_err_cnt),
4253[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_pcic_post_dat_q_cor_err_cnt),
4256[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4257 CNTR_NORMAL,
4258 access_pcic_post_hd_q_cor_err_cnt),
4259[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4260 CNTR_NORMAL,
4261 access_pcic_retry_sot_mem_cor_err_cnt),
4262[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_pcic_retry_mem_cor_err_cnt),
4265[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4266 "CceCli1AsyncFifoDbgParityError", 0, 0,
4267 CNTR_NORMAL,
4268 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4269[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4270 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4271 CNTR_NORMAL,
4272 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4273 ),
4274[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4275 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4278[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4279 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4282[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4283 0, CNTR_NORMAL,
4284 access_cce_cli2_async_fifo_parity_err_cnt),
4285[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4286 CNTR_NORMAL,
4287 access_cce_csr_cfg_bus_parity_err_cnt),
4288[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4289 0, CNTR_NORMAL,
4290 access_cce_cli0_async_fifo_parity_err_cnt),
4291[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4292 CNTR_NORMAL,
4293 access_cce_rspd_data_parity_err_cnt),
4294[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4295 CNTR_NORMAL,
4296 access_cce_trgt_access_err_cnt),
4297[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4298 0, CNTR_NORMAL,
4299 access_cce_trgt_async_fifo_parity_err_cnt),
4300[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_cce_csr_write_bad_addr_err_cnt),
4303[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4304 CNTR_NORMAL,
4305 access_cce_csr_read_bad_addr_err_cnt),
4306[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4307 CNTR_NORMAL,
4308 access_ccs_csr_parity_err_cnt),
4309
4310/* RcvErrStatus */
4311[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4312 CNTR_NORMAL,
4313 access_rx_csr_parity_err_cnt),
4314[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4315 CNTR_NORMAL,
4316 access_rx_csr_write_bad_addr_err_cnt),
4317[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4318 CNTR_NORMAL,
4319 access_rx_csr_read_bad_addr_err_cnt),
4320[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4321 CNTR_NORMAL,
4322 access_rx_dma_csr_unc_err_cnt),
4323[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4324 CNTR_NORMAL,
4325 access_rx_dma_dq_fsm_encoding_err_cnt),
4326[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_rx_dma_eq_fsm_encoding_err_cnt),
4329[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4330 CNTR_NORMAL,
4331 access_rx_dma_csr_parity_err_cnt),
4332[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_rx_rbuf_data_cor_err_cnt),
4335[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4336 CNTR_NORMAL,
4337 access_rx_rbuf_data_unc_err_cnt),
4338[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_rx_dma_data_fifo_rd_cor_err_cnt),
4341[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4342 CNTR_NORMAL,
4343 access_rx_dma_data_fifo_rd_unc_err_cnt),
4344[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4345 CNTR_NORMAL,
4346 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4347[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4350[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4351 CNTR_NORMAL,
4352 access_rx_rbuf_desc_part2_cor_err_cnt),
4353[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4354 CNTR_NORMAL,
4355 access_rx_rbuf_desc_part2_unc_err_cnt),
4356[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4357 CNTR_NORMAL,
4358 access_rx_rbuf_desc_part1_cor_err_cnt),
4359[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_rx_rbuf_desc_part1_unc_err_cnt),
4362[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_hq_intr_fsm_err_cnt),
4365[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_hq_intr_csr_parity_err_cnt),
4368[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_lookup_csr_parity_err_cnt),
4371[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_lookup_rcv_array_cor_err_cnt),
4374[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_lookup_rcv_array_unc_err_cnt),
4377[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4378 0, CNTR_NORMAL,
4379 access_rx_lookup_des_part2_parity_err_cnt),
4380[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4381 0, CNTR_NORMAL,
4382 access_rx_lookup_des_part1_unc_cor_err_cnt),
4383[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_lookup_des_part1_unc_err_cnt),
4386[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_rbuf_next_free_buf_cor_err_cnt),
4389[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_rbuf_next_free_buf_unc_err_cnt),
4392[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4393 "RxRbufFlInitWrAddrParityErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4396[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4397 0, CNTR_NORMAL,
4398 access_rx_rbuf_fl_initdone_parity_err_cnt),
4399[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4400 0, CNTR_NORMAL,
4401 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4402[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4403 CNTR_NORMAL,
4404 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4405[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4406 CNTR_NORMAL,
4407 access_rx_rbuf_empty_err_cnt),
4408[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4409 CNTR_NORMAL,
4410 access_rx_rbuf_full_err_cnt),
4411[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4412 CNTR_NORMAL,
4413 access_rbuf_bad_lookup_err_cnt),
4414[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4415 CNTR_NORMAL,
4416 access_rbuf_ctx_id_parity_err_cnt),
4417[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4418 CNTR_NORMAL,
4419 access_rbuf_csr_qeopdw_parity_err_cnt),
4420[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4421 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4422 CNTR_NORMAL,
4423 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4424[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4425 "RxRbufCsrQTlPtrParityErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4428[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4429 0, CNTR_NORMAL,
4430 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4431[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4432 0, CNTR_NORMAL,
4433 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4434[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4435 0, 0, CNTR_NORMAL,
4436 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4437[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4438 0, CNTR_NORMAL,
4439 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4440[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4441 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4442 CNTR_NORMAL,
4443 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4444[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4445 0, CNTR_NORMAL,
4446 access_rx_rbuf_block_list_read_cor_err_cnt),
4447[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4448 0, CNTR_NORMAL,
4449 access_rx_rbuf_block_list_read_unc_err_cnt),
4450[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4451 CNTR_NORMAL,
4452 access_rx_rbuf_lookup_des_cor_err_cnt),
4453[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4454 CNTR_NORMAL,
4455 access_rx_rbuf_lookup_des_unc_err_cnt),
4456[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4457 "RxRbufLookupDesRegUncCorErr", 0, 0,
4458 CNTR_NORMAL,
4459 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4460[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4461 CNTR_NORMAL,
4462 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4463[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_free_list_cor_err_cnt),
4466[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_rbuf_free_list_unc_err_cnt),
4469[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rx_rcv_fsm_encoding_err_cnt),
4472[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_dma_flag_cor_err_cnt),
4475[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_dma_flag_unc_err_cnt),
4478[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_dc_sop_eop_parity_err_cnt),
4481[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_rcv_csr_parity_err_cnt),
4484[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_rcv_qp_map_table_cor_err_cnt),
4487[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rcv_qp_map_table_unc_err_cnt),
4490[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rcv_data_cor_err_cnt),
4493[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_rcv_data_unc_err_cnt),
4496[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_rcv_hdr_cor_err_cnt),
4499[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_rcv_hdr_unc_err_cnt),
4502[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_dc_intf_parity_err_cnt),
4505[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_dma_csr_cor_err_cnt),
4508/* SendPioErrStatus */
4509[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_pio_pec_sop_head_parity_err_cnt),
4512[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_pio_pcc_sop_head_parity_err_cnt),
4515[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4516 0, 0, CNTR_NORMAL,
4517 access_pio_last_returned_cnt_parity_err_cnt),
4518[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4519 0, CNTR_NORMAL,
4520 access_pio_current_free_cnt_parity_err_cnt),
4521[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4522 CNTR_NORMAL,
4523 access_pio_reserved_31_err_cnt),
4524[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4525 CNTR_NORMAL,
4526 access_pio_reserved_30_err_cnt),
4527[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4528 CNTR_NORMAL,
4529 access_pio_ppmc_sop_len_err_cnt),
4530[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4531 CNTR_NORMAL,
4532 access_pio_ppmc_bqc_mem_parity_err_cnt),
4533[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_pio_vl_fifo_parity_err_cnt),
4536[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4537 CNTR_NORMAL,
4538 access_pio_vlf_sop_parity_err_cnt),
4539[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4540 CNTR_NORMAL,
4541 access_pio_vlf_v1_len_parity_err_cnt),
4542[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4543 CNTR_NORMAL,
4544 access_pio_block_qw_count_parity_err_cnt),
4545[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_pio_write_qw_valid_parity_err_cnt),
4548[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_pio_state_machine_err_cnt),
4551[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4552 CNTR_NORMAL,
4553 access_pio_write_data_parity_err_cnt),
4554[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4555 CNTR_NORMAL,
4556 access_pio_host_addr_mem_cor_err_cnt),
4557[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4558 CNTR_NORMAL,
4559 access_pio_host_addr_mem_unc_err_cnt),
4560[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4563[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_pio_init_sm_in_err_cnt),
4566[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4567 CNTR_NORMAL,
4568 access_pio_ppmc_pbl_fifo_err_cnt),
4569[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4570 0, CNTR_NORMAL,
4571 access_pio_credit_ret_fifo_parity_err_cnt),
4572[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_v1_len_mem_bank1_cor_err_cnt),
4575[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_v1_len_mem_bank0_cor_err_cnt),
4578[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_v1_len_mem_bank1_unc_err_cnt),
4581[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_v1_len_mem_bank0_unc_err_cnt),
4584[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_sm_pkt_reset_parity_err_cnt),
4587[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_pkt_evict_fifo_parity_err_cnt),
4590[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4591 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4594[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_sbrdctl_crrel_parity_err_cnt),
4597[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_pec_fifo_parity_err_cnt),
4600[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_pcc_fifo_parity_err_cnt),
4603[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4604 CNTR_NORMAL,
4605 access_pio_sb_mem_fifo1_err_cnt),
4606[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4607 CNTR_NORMAL,
4608 access_pio_sb_mem_fifo0_err_cnt),
4609[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_pio_csr_parity_err_cnt),
4612[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_pio_write_addr_parity_err_cnt),
4615[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_pio_write_bad_ctxt_err_cnt),
4618/* SendDmaErrStatus */
4619[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4620 0, CNTR_NORMAL,
4621 access_sdma_pcie_req_tracking_cor_err_cnt),
4622[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4623 0, CNTR_NORMAL,
4624 access_sdma_pcie_req_tracking_unc_err_cnt),
4625[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_sdma_csr_parity_err_cnt),
4628[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4629 CNTR_NORMAL,
4630 access_sdma_rpy_tag_err_cnt),
4631/* SendEgressErrStatus */
4632[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_tx_read_pio_memory_csr_unc_err_cnt),
4635[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4636 0, CNTR_NORMAL,
4637 access_tx_read_sdma_memory_csr_err_cnt),
4638[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_tx_egress_fifo_cor_err_cnt),
4641[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4642 CNTR_NORMAL,
4643 access_tx_read_pio_memory_cor_err_cnt),
4644[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4645 CNTR_NORMAL,
4646 access_tx_read_sdma_memory_cor_err_cnt),
4647[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_tx_sb_hdr_cor_err_cnt),
4650[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_tx_credit_overrun_err_cnt),
4653[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4654 CNTR_NORMAL,
4655 access_tx_launch_fifo8_cor_err_cnt),
4656[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4657 CNTR_NORMAL,
4658 access_tx_launch_fifo7_cor_err_cnt),
4659[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_tx_launch_fifo6_cor_err_cnt),
4662[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_tx_launch_fifo5_cor_err_cnt),
4665[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_tx_launch_fifo4_cor_err_cnt),
4668[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_tx_launch_fifo3_cor_err_cnt),
4671[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4672 CNTR_NORMAL,
4673 access_tx_launch_fifo2_cor_err_cnt),
4674[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4675 CNTR_NORMAL,
4676 access_tx_launch_fifo1_cor_err_cnt),
4677[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4678 CNTR_NORMAL,
4679 access_tx_launch_fifo0_cor_err_cnt),
4680[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4681 CNTR_NORMAL,
4682 access_tx_credit_return_vl_err_cnt),
4683[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_tx_hcrc_insertion_err_cnt),
4686[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4687 CNTR_NORMAL,
4688 access_tx_egress_fifo_unc_err_cnt),
4689[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_read_pio_memory_unc_err_cnt),
4692[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_read_sdma_memory_unc_err_cnt),
4695[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_sb_hdr_unc_err_cnt),
4698[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_credit_return_partiy_err_cnt),
4701[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4702 0, 0, CNTR_NORMAL,
4703 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4704[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4705 0, 0, CNTR_NORMAL,
4706 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4707[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4708 0, 0, CNTR_NORMAL,
4709 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4710[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4711 0, 0, CNTR_NORMAL,
4712 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4713[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4714 0, 0, CNTR_NORMAL,
4715 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4716[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4717 0, 0, CNTR_NORMAL,
4718 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4719[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4720 0, 0, CNTR_NORMAL,
4721 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4722[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4723 0, 0, CNTR_NORMAL,
4724 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4725[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4726 0, 0, CNTR_NORMAL,
4727 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4728[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4729 0, 0, CNTR_NORMAL,
4730 access_tx_sdma15_disallowed_packet_err_cnt),
4731[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4732 0, 0, CNTR_NORMAL,
4733 access_tx_sdma14_disallowed_packet_err_cnt),
4734[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4735 0, 0, CNTR_NORMAL,
4736 access_tx_sdma13_disallowed_packet_err_cnt),
4737[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4738 0, 0, CNTR_NORMAL,
4739 access_tx_sdma12_disallowed_packet_err_cnt),
4740[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4741 0, 0, CNTR_NORMAL,
4742 access_tx_sdma11_disallowed_packet_err_cnt),
4743[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4744 0, 0, CNTR_NORMAL,
4745 access_tx_sdma10_disallowed_packet_err_cnt),
4746[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4747 0, 0, CNTR_NORMAL,
4748 access_tx_sdma9_disallowed_packet_err_cnt),
4749[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4750 0, 0, CNTR_NORMAL,
4751 access_tx_sdma8_disallowed_packet_err_cnt),
4752[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_sdma7_disallowed_packet_err_cnt),
4755[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_sdma6_disallowed_packet_err_cnt),
4758[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_sdma5_disallowed_packet_err_cnt),
4761[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_sdma4_disallowed_packet_err_cnt),
4764[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_sdma3_disallowed_packet_err_cnt),
4767[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_sdma2_disallowed_packet_err_cnt),
4770[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_sdma1_disallowed_packet_err_cnt),
4773[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_sdma0_disallowed_packet_err_cnt),
4776[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4777 CNTR_NORMAL,
4778 access_tx_config_parity_err_cnt),
4779[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4780 CNTR_NORMAL,
4781 access_tx_sbrd_ctl_csr_parity_err_cnt),
4782[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4783 CNTR_NORMAL,
4784 access_tx_launch_csr_parity_err_cnt),
4785[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4786 CNTR_NORMAL,
4787 access_tx_illegal_vl_err_cnt),
4788[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4789 "TxSbrdCtlStateMachineParityErr", 0, 0,
4790 CNTR_NORMAL,
4791 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4792[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4793 CNTR_NORMAL,
4794 access_egress_reserved_10_err_cnt),
4795[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4796 CNTR_NORMAL,
4797 access_egress_reserved_9_err_cnt),
4798[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4799 0, 0, CNTR_NORMAL,
4800 access_tx_sdma_launch_intf_parity_err_cnt),
4801[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4802 CNTR_NORMAL,
4803 access_tx_pio_launch_intf_parity_err_cnt),
4804[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4805 CNTR_NORMAL,
4806 access_egress_reserved_6_err_cnt),
4807[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4808 CNTR_NORMAL,
4809 access_tx_incorrect_link_state_err_cnt),
4810[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4811 CNTR_NORMAL,
4812 access_tx_linkdown_err_cnt),
4813[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4814 "EgressFifoUnderrunOrParityErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4817[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4818 CNTR_NORMAL,
4819 access_egress_reserved_2_err_cnt),
4820[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4821 CNTR_NORMAL,
4822 access_tx_pkt_integrity_mem_unc_err_cnt),
4823[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_pkt_integrity_mem_cor_err_cnt),
4826/* SendErrStatus */
4827[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_send_csr_write_bad_addr_err_cnt),
4830[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_send_csr_read_bad_addr_err_cnt),
4833[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4834 CNTR_NORMAL,
4835 access_send_csr_parity_cnt),
4836/* SendCtxtErrStatus */
4837[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4838 CNTR_NORMAL,
4839 access_pio_write_out_of_bounds_err_cnt),
4840[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_pio_write_overflow_err_cnt),
4843[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4844 0, 0, CNTR_NORMAL,
4845 access_pio_write_crosses_boundary_err_cnt),
4846[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4847 CNTR_NORMAL,
4848 access_pio_disallowed_packet_err_cnt),
4849[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4850 CNTR_NORMAL,
4851 access_pio_inconsistent_sop_err_cnt),
4852/* SendDmaEngErrStatus */
4853[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4854 0, 0, CNTR_NORMAL,
4855 access_sdma_header_request_fifo_cor_err_cnt),
4856[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4857 CNTR_NORMAL,
4858 access_sdma_header_storage_cor_err_cnt),
4859[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4860 CNTR_NORMAL,
4861 access_sdma_packet_tracking_cor_err_cnt),
4862[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_sdma_assembly_cor_err_cnt),
4865[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4866 CNTR_NORMAL,
4867 access_sdma_desc_table_cor_err_cnt),
4868[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4869 0, 0, CNTR_NORMAL,
4870 access_sdma_header_request_fifo_unc_err_cnt),
4871[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_sdma_header_storage_unc_err_cnt),
4874[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_sdma_packet_tracking_unc_err_cnt),
4877[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4878 CNTR_NORMAL,
4879 access_sdma_assembly_unc_err_cnt),
4880[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4881 CNTR_NORMAL,
4882 access_sdma_desc_table_unc_err_cnt),
4883[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4884 CNTR_NORMAL,
4885 access_sdma_timeout_err_cnt),
4886[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4887 CNTR_NORMAL,
4888 access_sdma_header_length_err_cnt),
4889[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4890 CNTR_NORMAL,
4891 access_sdma_header_address_err_cnt),
4892[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4893 CNTR_NORMAL,
4894 access_sdma_header_select_err_cnt),
4895[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4896 CNTR_NORMAL,
4897 access_sdma_reserved_9_err_cnt),
4898[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4899 CNTR_NORMAL,
4900 access_sdma_packet_desc_overflow_err_cnt),
4901[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4902 CNTR_NORMAL,
4903 access_sdma_length_mismatch_err_cnt),
4904[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4905 CNTR_NORMAL,
4906 access_sdma_halt_err_cnt),
4907[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_mem_read_err_cnt),
4910[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_first_desc_err_cnt),
4913[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_tail_out_of_bounds_err_cnt),
4916[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4917 CNTR_NORMAL,
4918 access_sdma_too_long_err_cnt),
4919[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4920 CNTR_NORMAL,
4921 access_sdma_gen_mismatch_err_cnt),
4922[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004925};
4926
4927static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4928[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4929 CNTR_NORMAL),
4930[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4931 CNTR_NORMAL),
4932[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4933 CNTR_NORMAL),
4934[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4935 CNTR_NORMAL),
4936[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4937 CNTR_NORMAL),
4938[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4939 CNTR_NORMAL),
4940[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4941 CNTR_NORMAL),
4942[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4943[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4944[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4945[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4946 CNTR_SYNTH | CNTR_VL),
4947[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4948 CNTR_SYNTH | CNTR_VL),
4949[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4950 CNTR_SYNTH | CNTR_VL),
4951[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4952[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4953[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4954 access_sw_link_dn_cnt),
4955[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4956 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004957[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4958 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004959[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4960 access_sw_xmit_discards),
4961[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4962 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4963 access_sw_xmit_discards),
4964[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4965 access_xmit_constraint_errs),
4966[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4967 access_rcv_constraint_errs),
4968[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4969[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4970[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4971[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4972[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4973[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4974[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4975[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4976[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4977[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4978[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4979[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4980[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4981 access_sw_cpu_rc_acks),
4982[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4983 access_sw_cpu_rc_qacks),
4984[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4985 access_sw_cpu_rc_delayed_comp),
4986[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4987[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4988[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4989[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4990[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4991[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4992[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4993[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4994[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4995[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4996[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4997[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4998[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
4999[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5000[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5001[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5002[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5003[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5004[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5005[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5006[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5007[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5008[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5009[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5010[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5011[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5012[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5013[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5014[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5015[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5016[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5017[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5018[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5019[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5020[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5021[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5022[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5023[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5024[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5025[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5026[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5027[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5028[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5029[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5030[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5031[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5032[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5033[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5034[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5035[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5036[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5037[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5038[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5039[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5040[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5041[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5042[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5043[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5044[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5045[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5046[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5047[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5048[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5049[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5050[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5051[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5052[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5053[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5054[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5055[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5056[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5057[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5058[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5059[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5060[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5061[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5062[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5063[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5064[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5065[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5066};
5067
5068/* ======================================================================== */
5069
Mike Marciniszyn77241052015-07-30 15:17:43 -04005070/* return true if this is chip revision revision a */
5071int is_ax(struct hfi1_devdata *dd)
5072{
5073 u8 chip_rev_minor =
5074 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5075 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5076 return (chip_rev_minor & 0xf0) == 0;
5077}
5078
5079/* return true if this is chip revision revision b */
5080int is_bx(struct hfi1_devdata *dd)
5081{
5082 u8 chip_rev_minor =
5083 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5084 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005085 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005086}
5087
5088/*
5089 * Append string s to buffer buf. Arguments curp and len are the current
5090 * position and remaining length, respectively.
5091 *
5092 * return 0 on success, 1 on out of room
5093 */
5094static int append_str(char *buf, char **curp, int *lenp, const char *s)
5095{
5096 char *p = *curp;
5097 int len = *lenp;
5098 int result = 0; /* success */
5099 char c;
5100
5101 /* add a comma, if first in the buffer */
5102 if (p != buf) {
5103 if (len == 0) {
5104 result = 1; /* out of room */
5105 goto done;
5106 }
5107 *p++ = ',';
5108 len--;
5109 }
5110
5111 /* copy the string */
5112 while ((c = *s++) != 0) {
5113 if (len == 0) {
5114 result = 1; /* out of room */
5115 goto done;
5116 }
5117 *p++ = c;
5118 len--;
5119 }
5120
5121done:
5122 /* write return values */
5123 *curp = p;
5124 *lenp = len;
5125
5126 return result;
5127}
5128
5129/*
5130 * Using the given flag table, print a comma separated string into
5131 * the buffer. End in '*' if the buffer is too short.
5132 */
5133static char *flag_string(char *buf, int buf_len, u64 flags,
5134 struct flag_table *table, int table_size)
5135{
5136 char extra[32];
5137 char *p = buf;
5138 int len = buf_len;
5139 int no_room = 0;
5140 int i;
5141
5142 /* make sure there is at least 2 so we can form "*" */
5143 if (len < 2)
5144 return "";
5145
5146 len--; /* leave room for a nul */
5147 for (i = 0; i < table_size; i++) {
5148 if (flags & table[i].flag) {
5149 no_room = append_str(buf, &p, &len, table[i].str);
5150 if (no_room)
5151 break;
5152 flags &= ~table[i].flag;
5153 }
5154 }
5155
5156 /* any undocumented bits left? */
5157 if (!no_room && flags) {
5158 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5159 no_room = append_str(buf, &p, &len, extra);
5160 }
5161
5162 /* add * if ran out of room */
5163 if (no_room) {
5164 /* may need to back up to add space for a '*' */
5165 if (len == 0)
5166 --p;
5167 *p++ = '*';
5168 }
5169
5170 /* add final nul - space already allocated above */
5171 *p = 0;
5172 return buf;
5173}
5174
5175/* first 8 CCE error interrupt source names */
5176static const char * const cce_misc_names[] = {
5177 "CceErrInt", /* 0 */
5178 "RxeErrInt", /* 1 */
5179 "MiscErrInt", /* 2 */
5180 "Reserved3", /* 3 */
5181 "PioErrInt", /* 4 */
5182 "SDmaErrInt", /* 5 */
5183 "EgressErrInt", /* 6 */
5184 "TxeErrInt" /* 7 */
5185};
5186
5187/*
5188 * Return the miscellaneous error interrupt name.
5189 */
5190static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5191{
5192 if (source < ARRAY_SIZE(cce_misc_names))
5193 strncpy(buf, cce_misc_names[source], bsize);
5194 else
5195 snprintf(buf,
5196 bsize,
5197 "Reserved%u",
5198 source + IS_GENERAL_ERR_START);
5199
5200 return buf;
5201}
5202
5203/*
5204 * Return the SDMA engine error interrupt name.
5205 */
5206static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5207{
5208 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5209 return buf;
5210}
5211
5212/*
5213 * Return the send context error interrupt name.
5214 */
5215static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5216{
5217 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5218 return buf;
5219}
5220
5221static const char * const various_names[] = {
5222 "PbcInt",
5223 "GpioAssertInt",
5224 "Qsfp1Int",
5225 "Qsfp2Int",
5226 "TCritInt"
5227};
5228
5229/*
5230 * Return the various interrupt name.
5231 */
5232static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5233{
5234 if (source < ARRAY_SIZE(various_names))
5235 strncpy(buf, various_names[source], bsize);
5236 else
5237 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5238 return buf;
5239}
5240
5241/*
5242 * Return the DC interrupt name.
5243 */
5244static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5245{
5246 static const char * const dc_int_names[] = {
5247 "common",
5248 "lcb",
5249 "8051",
5250 "lbm" /* local block merge */
5251 };
5252
5253 if (source < ARRAY_SIZE(dc_int_names))
5254 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5255 else
5256 snprintf(buf, bsize, "DCInt%u", source);
5257 return buf;
5258}
5259
5260static const char * const sdma_int_names[] = {
5261 "SDmaInt",
5262 "SdmaIdleInt",
5263 "SdmaProgressInt",
5264};
5265
5266/*
5267 * Return the SDMA engine interrupt name.
5268 */
5269static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5270{
5271 /* what interrupt */
5272 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5273 /* which engine */
5274 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5275
5276 if (likely(what < 3))
5277 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5278 else
5279 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5280 return buf;
5281}
5282
5283/*
5284 * Return the receive available interrupt name.
5285 */
5286static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5287{
5288 snprintf(buf, bsize, "RcvAvailInt%u", source);
5289 return buf;
5290}
5291
5292/*
5293 * Return the receive urgent interrupt name.
5294 */
5295static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5296{
5297 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5298 return buf;
5299}
5300
5301/*
5302 * Return the send credit interrupt name.
5303 */
5304static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5305{
5306 snprintf(buf, bsize, "SendCreditInt%u", source);
5307 return buf;
5308}
5309
5310/*
5311 * Return the reserved interrupt name.
5312 */
5313static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5314{
5315 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5316 return buf;
5317}
5318
5319static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5320{
5321 return flag_string(buf, buf_len, flags,
5322 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5323}
5324
5325static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5326{
5327 return flag_string(buf, buf_len, flags,
5328 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5329}
5330
5331static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5332{
5333 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5334 ARRAY_SIZE(misc_err_status_flags));
5335}
5336
5337static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5338{
5339 return flag_string(buf, buf_len, flags,
5340 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5341}
5342
5343static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5344{
5345 return flag_string(buf, buf_len, flags,
5346 sdma_err_status_flags,
5347 ARRAY_SIZE(sdma_err_status_flags));
5348}
5349
5350static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5351{
5352 return flag_string(buf, buf_len, flags,
5353 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5354}
5355
5356static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5357{
5358 return flag_string(buf, buf_len, flags,
5359 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5360}
5361
5362static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5363{
5364 return flag_string(buf, buf_len, flags,
5365 send_err_status_flags,
5366 ARRAY_SIZE(send_err_status_flags));
5367}
5368
5369static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5370{
5371 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005372 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005373
5374 /*
5375 * For most these errors, there is nothing that can be done except
5376 * report or record it.
5377 */
5378 dd_dev_info(dd, "CCE Error: %s\n",
5379 cce_err_status_string(buf, sizeof(buf), reg));
5380
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005381 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5382 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005383 /* this error requires a manual drop into SPC freeze mode */
5384 /* then a fix up */
5385 start_freeze_handling(dd->pport, FREEZE_SELF);
5386 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005387
5388 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5389 if (reg & (1ull << i)) {
5390 incr_cntr64(&dd->cce_err_status_cnt[i]);
5391 /* maintain a counter over all cce_err_status errors */
5392 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5393 }
5394 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005395}
5396
5397/*
5398 * Check counters for receive errors that do not have an interrupt
5399 * associated with them.
5400 */
5401#define RCVERR_CHECK_TIME 10
5402static void update_rcverr_timer(unsigned long opaque)
5403{
5404 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5405 struct hfi1_pportdata *ppd = dd->pport;
5406 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5407
5408 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5409 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5410 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5411 set_link_down_reason(ppd,
5412 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5413 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5414 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5415 }
5416 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5417
5418 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5419}
5420
5421static int init_rcverr(struct hfi1_devdata *dd)
5422{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305423 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005424 /* Assume the hardware counter has been reset */
5425 dd->rcv_ovfl_cnt = 0;
5426 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5427}
5428
5429static void free_rcverr(struct hfi1_devdata *dd)
5430{
5431 if (dd->rcverr_timer.data)
5432 del_timer_sync(&dd->rcverr_timer);
5433 dd->rcverr_timer.data = 0;
5434}
5435
5436static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5437{
5438 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005439 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005440
5441 dd_dev_info(dd, "Receive Error: %s\n",
5442 rxe_err_status_string(buf, sizeof(buf), reg));
5443
5444 if (reg & ALL_RXE_FREEZE_ERR) {
5445 int flags = 0;
5446
5447 /*
5448 * Freeze mode recovery is disabled for the errors
5449 * in RXE_FREEZE_ABORT_MASK
5450 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005451 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005452 flags = FREEZE_ABORT;
5453
5454 start_freeze_handling(dd->pport, flags);
5455 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005456
5457 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5458 if (reg & (1ull << i))
5459 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5460 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005461}
5462
5463static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5464{
5465 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005466 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005467
5468 dd_dev_info(dd, "Misc Error: %s",
5469 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005470 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5471 if (reg & (1ull << i))
5472 incr_cntr64(&dd->misc_err_status_cnt[i]);
5473 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005474}
5475
5476static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5477{
5478 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005479 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005480
5481 dd_dev_info(dd, "PIO Error: %s\n",
5482 pio_err_status_string(buf, sizeof(buf), reg));
5483
5484 if (reg & ALL_PIO_FREEZE_ERR)
5485 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005486
5487 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5488 if (reg & (1ull << i))
5489 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5490 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005491}
5492
5493static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5494{
5495 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005496 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005497
5498 dd_dev_info(dd, "SDMA Error: %s\n",
5499 sdma_err_status_string(buf, sizeof(buf), reg));
5500
5501 if (reg & ALL_SDMA_FREEZE_ERR)
5502 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005503
5504 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5505 if (reg & (1ull << i))
5506 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5507 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005508}
5509
5510static void count_port_inactive(struct hfi1_devdata *dd)
5511{
5512 struct hfi1_pportdata *ppd = dd->pport;
5513
5514 if (ppd->port_xmit_discards < ~(u64)0)
5515 ppd->port_xmit_discards++;
5516}
5517
5518/*
5519 * We have had a "disallowed packet" error during egress. Determine the
5520 * integrity check which failed, and update relevant error counter, etc.
5521 *
5522 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5523 * bit of state per integrity check, and so we can miss the reason for an
5524 * egress error if more than one packet fails the same integrity check
5525 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5526 */
5527static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5528{
5529 struct hfi1_pportdata *ppd = dd->pport;
5530 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5531 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5532 char buf[96];
5533
5534 /* clear down all observed info as quickly as possible after read */
5535 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5536
5537 dd_dev_info(dd,
5538 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5539 info, egress_err_info_string(buf, sizeof(buf), info), src);
5540
5541 /* Eventually add other counters for each bit */
5542
5543 if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5544 if (ppd->port_xmit_discards < ~(u64)0)
5545 ppd->port_xmit_discards++;
5546 }
5547}
5548
5549/*
5550 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5551 * register. Does it represent a 'port inactive' error?
5552 */
5553static inline int port_inactive_err(u64 posn)
5554{
5555 return (posn >= SEES(TX_LINKDOWN) &&
5556 posn <= SEES(TX_INCORRECT_LINK_STATE));
5557}
5558
5559/*
5560 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5561 * register. Does it represent a 'disallowed packet' error?
5562 */
5563static inline int disallowed_pkt_err(u64 posn)
5564{
5565 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5566 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5567}
5568
5569static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5570{
5571 u64 reg_copy = reg, handled = 0;
5572 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005573 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005574
5575 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5576 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005577 if (is_ax(dd) && (reg &
Mike Marciniszyn77241052015-07-30 15:17:43 -04005578 SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5579 && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5580 start_freeze_handling(dd->pport, 0);
5581
5582 while (reg_copy) {
5583 int posn = fls64(reg_copy);
5584 /*
5585 * fls64() returns a 1-based offset, but we generally
5586 * want 0-based offsets.
5587 */
5588 int shift = posn - 1;
5589
5590 if (port_inactive_err(shift)) {
5591 count_port_inactive(dd);
5592 handled |= (1ULL << shift);
5593 } else if (disallowed_pkt_err(shift)) {
5594 handle_send_egress_err_info(dd);
5595 handled |= (1ULL << shift);
5596 }
5597 clear_bit(shift, (unsigned long *)&reg_copy);
5598 }
5599
5600 reg &= ~handled;
5601
5602 if (reg)
5603 dd_dev_info(dd, "Egress Error: %s\n",
5604 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005605
5606 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5607 if (reg & (1ull << i))
5608 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5609 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005610}
5611
5612static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5613{
5614 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005615 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005616
5617 dd_dev_info(dd, "Send Error: %s\n",
5618 send_err_status_string(buf, sizeof(buf), reg));
5619
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005620 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5621 if (reg & (1ull << i))
5622 incr_cntr64(&dd->send_err_status_cnt[i]);
5623 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005624}
5625
5626/*
5627 * The maximum number of times the error clear down will loop before
5628 * blocking a repeating error. This value is arbitrary.
5629 */
5630#define MAX_CLEAR_COUNT 20
5631
5632/*
5633 * Clear and handle an error register. All error interrupts are funneled
5634 * through here to have a central location to correctly handle single-
5635 * or multi-shot errors.
5636 *
5637 * For non per-context registers, call this routine with a context value
5638 * of 0 so the per-context offset is zero.
5639 *
5640 * If the handler loops too many times, assume that something is wrong
5641 * and can't be fixed, so mask the error bits.
5642 */
5643static void interrupt_clear_down(struct hfi1_devdata *dd,
5644 u32 context,
5645 const struct err_reg_info *eri)
5646{
5647 u64 reg;
5648 u32 count;
5649
5650 /* read in a loop until no more errors are seen */
5651 count = 0;
5652 while (1) {
5653 reg = read_kctxt_csr(dd, context, eri->status);
5654 if (reg == 0)
5655 break;
5656 write_kctxt_csr(dd, context, eri->clear, reg);
5657 if (likely(eri->handler))
5658 eri->handler(dd, context, reg);
5659 count++;
5660 if (count > MAX_CLEAR_COUNT) {
5661 u64 mask;
5662
5663 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5664 eri->desc, reg);
5665 /*
5666 * Read-modify-write so any other masked bits
5667 * remain masked.
5668 */
5669 mask = read_kctxt_csr(dd, context, eri->mask);
5670 mask &= ~reg;
5671 write_kctxt_csr(dd, context, eri->mask, mask);
5672 break;
5673 }
5674 }
5675}
5676
5677/*
5678 * CCE block "misc" interrupt. Source is < 16.
5679 */
5680static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5681{
5682 const struct err_reg_info *eri = &misc_errs[source];
5683
5684 if (eri->handler) {
5685 interrupt_clear_down(dd, 0, eri);
5686 } else {
5687 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5688 source);
5689 }
5690}
5691
5692static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5693{
5694 return flag_string(buf, buf_len, flags,
5695 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5696}
5697
5698/*
5699 * Send context error interrupt. Source (hw_context) is < 160.
5700 *
5701 * All send context errors cause the send context to halt. The normal
5702 * clear-down mechanism cannot be used because we cannot clear the
5703 * error bits until several other long-running items are done first.
5704 * This is OK because with the context halted, nothing else is going
5705 * to happen on it anyway.
5706 */
5707static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5708 unsigned int hw_context)
5709{
5710 struct send_context_info *sci;
5711 struct send_context *sc;
5712 char flags[96];
5713 u64 status;
5714 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005715 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005716
5717 sw_index = dd->hw_to_sw[hw_context];
5718 if (sw_index >= dd->num_send_contexts) {
5719 dd_dev_err(dd,
5720 "out of range sw index %u for send context %u\n",
5721 sw_index, hw_context);
5722 return;
5723 }
5724 sci = &dd->send_contexts[sw_index];
5725 sc = sci->sc;
5726 if (!sc) {
5727 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5728 sw_index, hw_context);
5729 return;
5730 }
5731
5732 /* tell the software that a halt has begun */
5733 sc_stop(sc, SCF_HALTED);
5734
5735 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5736
5737 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5738 send_context_err_status_string(flags, sizeof(flags), status));
5739
5740 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5741 handle_send_egress_err_info(dd);
5742
5743 /*
5744 * Automatically restart halted kernel contexts out of interrupt
5745 * context. User contexts must ask the driver to restart the context.
5746 */
5747 if (sc->type != SC_USER)
5748 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005749
5750 /*
5751 * Update the counters for the corresponding status bits.
5752 * Note that these particular counters are aggregated over all
5753 * 160 contexts.
5754 */
5755 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5756 if (status & (1ull << i))
5757 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5758 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005759}
5760
5761static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5762 unsigned int source, u64 status)
5763{
5764 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005765 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005766
5767 sde = &dd->per_sdma[source];
5768#ifdef CONFIG_SDMA_VERBOSITY
5769 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5770 slashstrip(__FILE__), __LINE__, __func__);
5771 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5772 sde->this_idx, source, (unsigned long long)status);
5773#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005774 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005775 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005776
5777 /*
5778 * Update the counters for the corresponding status bits.
5779 * Note that these particular counters are aggregated over
5780 * all 16 DMA engines.
5781 */
5782 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5783 if (status & (1ull << i))
5784 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5785 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005786}
5787
5788/*
5789 * CCE block SDMA error interrupt. Source is < 16.
5790 */
5791static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5792{
5793#ifdef CONFIG_SDMA_VERBOSITY
5794 struct sdma_engine *sde = &dd->per_sdma[source];
5795
5796 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5797 slashstrip(__FILE__), __LINE__, __func__);
5798 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5799 source);
5800 sdma_dumpstate(sde);
5801#endif
5802 interrupt_clear_down(dd, source, &sdma_eng_err);
5803}
5804
5805/*
5806 * CCE block "various" interrupt. Source is < 8.
5807 */
5808static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5809{
5810 const struct err_reg_info *eri = &various_err[source];
5811
5812 /*
5813 * TCritInt cannot go through interrupt_clear_down()
5814 * because it is not a second tier interrupt. The handler
5815 * should be called directly.
5816 */
5817 if (source == TCRIT_INT_SOURCE)
5818 handle_temp_err(dd);
5819 else if (eri->handler)
5820 interrupt_clear_down(dd, 0, eri);
5821 else
5822 dd_dev_info(dd,
5823 "%s: Unimplemented/reserved interrupt %d\n",
5824 __func__, source);
5825}
5826
5827static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5828{
5829 /* source is always zero */
5830 struct hfi1_pportdata *ppd = dd->pport;
5831 unsigned long flags;
5832 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5833
5834 if (reg & QSFP_HFI0_MODPRST_N) {
5835
5836 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5837 __func__);
5838
5839 if (!qsfp_mod_present(ppd)) {
5840 ppd->driver_link_ready = 0;
5841 /*
5842 * Cable removed, reset all our information about the
5843 * cache and cable capabilities
5844 */
5845
5846 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5847 /*
5848 * We don't set cache_refresh_required here as we expect
5849 * an interrupt when a cable is inserted
5850 */
5851 ppd->qsfp_info.cache_valid = 0;
5852 ppd->qsfp_info.qsfp_interrupt_functional = 0;
5853 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5854 flags);
5855 write_csr(dd,
5856 dd->hfi1_id ?
5857 ASIC_QSFP2_INVERT :
5858 ASIC_QSFP1_INVERT,
5859 qsfp_int_mgmt);
5860 if (ppd->host_link_state == HLS_DN_POLL) {
5861 /*
5862 * The link is still in POLL. This means
5863 * that the normal link down processing
5864 * will not happen. We have to do it here
5865 * before turning the DC off.
5866 */
5867 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5868 }
5869 } else {
5870 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5871 ppd->qsfp_info.cache_valid = 0;
5872 ppd->qsfp_info.cache_refresh_required = 1;
5873 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5874 flags);
5875
5876 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
5877 write_csr(dd,
5878 dd->hfi1_id ?
5879 ASIC_QSFP2_INVERT :
5880 ASIC_QSFP1_INVERT,
5881 qsfp_int_mgmt);
5882 }
5883 }
5884
5885 if (reg & QSFP_HFI0_INT_N) {
5886
5887 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5888 __func__);
5889 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5890 ppd->qsfp_info.check_interrupt_flags = 1;
5891 ppd->qsfp_info.qsfp_interrupt_functional = 1;
5892 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5893 }
5894
5895 /* Schedule the QSFP work only if there is a cable attached. */
5896 if (qsfp_mod_present(ppd))
5897 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5898}
5899
5900static int request_host_lcb_access(struct hfi1_devdata *dd)
5901{
5902 int ret;
5903
5904 ret = do_8051_command(dd, HCMD_MISC,
5905 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5906 NULL);
5907 if (ret != HCMD_SUCCESS) {
5908 dd_dev_err(dd, "%s: command failed with error %d\n",
5909 __func__, ret);
5910 }
5911 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5912}
5913
5914static int request_8051_lcb_access(struct hfi1_devdata *dd)
5915{
5916 int ret;
5917
5918 ret = do_8051_command(dd, HCMD_MISC,
5919 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5920 NULL);
5921 if (ret != HCMD_SUCCESS) {
5922 dd_dev_err(dd, "%s: command failed with error %d\n",
5923 __func__, ret);
5924 }
5925 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5926}
5927
5928/*
5929 * Set the LCB selector - allow host access. The DCC selector always
5930 * points to the host.
5931 */
5932static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5933{
5934 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5935 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5936 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5937}
5938
5939/*
5940 * Clear the LCB selector - allow 8051 access. The DCC selector always
5941 * points to the host.
5942 */
5943static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5944{
5945 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5946 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5947}
5948
5949/*
5950 * Acquire LCB access from the 8051. If the host already has access,
5951 * just increment a counter. Otherwise, inform the 8051 that the
5952 * host is taking access.
5953 *
5954 * Returns:
5955 * 0 on success
5956 * -EBUSY if the 8051 has control and cannot be disturbed
5957 * -errno if unable to acquire access from the 8051
5958 */
5959int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5960{
5961 struct hfi1_pportdata *ppd = dd->pport;
5962 int ret = 0;
5963
5964 /*
5965 * Use the host link state lock so the operation of this routine
5966 * { link state check, selector change, count increment } can occur
5967 * as a unit against a link state change. Otherwise there is a
5968 * race between the state change and the count increment.
5969 */
5970 if (sleep_ok) {
5971 mutex_lock(&ppd->hls_lock);
5972 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03005973 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005974 udelay(1);
5975 }
5976
5977 /* this access is valid only when the link is up */
5978 if ((ppd->host_link_state & HLS_UP) == 0) {
5979 dd_dev_info(dd, "%s: link state %s not up\n",
5980 __func__, link_state_name(ppd->host_link_state));
5981 ret = -EBUSY;
5982 goto done;
5983 }
5984
5985 if (dd->lcb_access_count == 0) {
5986 ret = request_host_lcb_access(dd);
5987 if (ret) {
5988 dd_dev_err(dd,
5989 "%s: unable to acquire LCB access, err %d\n",
5990 __func__, ret);
5991 goto done;
5992 }
5993 set_host_lcb_access(dd);
5994 }
5995 dd->lcb_access_count++;
5996done:
5997 mutex_unlock(&ppd->hls_lock);
5998 return ret;
5999}
6000
6001/*
6002 * Release LCB access by decrementing the use count. If the count is moving
6003 * from 1 to 0, inform 8051 that it has control back.
6004 *
6005 * Returns:
6006 * 0 on success
6007 * -errno if unable to release access to the 8051
6008 */
6009int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6010{
6011 int ret = 0;
6012
6013 /*
6014 * Use the host link state lock because the acquire needed it.
6015 * Here, we only need to keep { selector change, count decrement }
6016 * as a unit.
6017 */
6018 if (sleep_ok) {
6019 mutex_lock(&dd->pport->hls_lock);
6020 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006021 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006022 udelay(1);
6023 }
6024
6025 if (dd->lcb_access_count == 0) {
6026 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6027 __func__);
6028 goto done;
6029 }
6030
6031 if (dd->lcb_access_count == 1) {
6032 set_8051_lcb_access(dd);
6033 ret = request_8051_lcb_access(dd);
6034 if (ret) {
6035 dd_dev_err(dd,
6036 "%s: unable to release LCB access, err %d\n",
6037 __func__, ret);
6038 /* restore host access if the grant didn't work */
6039 set_host_lcb_access(dd);
6040 goto done;
6041 }
6042 }
6043 dd->lcb_access_count--;
6044done:
6045 mutex_unlock(&dd->pport->hls_lock);
6046 return ret;
6047}
6048
6049/*
6050 * Initialize LCB access variables and state. Called during driver load,
6051 * after most of the initialization is finished.
6052 *
6053 * The DC default is LCB access on for the host. The driver defaults to
6054 * leaving access to the 8051. Assign access now - this constrains the call
6055 * to this routine to be after all LCB set-up is done. In particular, after
6056 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6057 */
6058static void init_lcb_access(struct hfi1_devdata *dd)
6059{
6060 dd->lcb_access_count = 0;
6061}
6062
6063/*
6064 * Write a response back to a 8051 request.
6065 */
6066static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6067{
6068 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6069 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6070 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6071 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6072}
6073
6074/*
6075 * Handle requests from the 8051.
6076 */
6077static void handle_8051_request(struct hfi1_devdata *dd)
6078{
6079 u64 reg;
6080 u16 data;
6081 u8 type;
6082
6083 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6084 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6085 return; /* no request */
6086
6087 /* zero out COMPLETED so the response is seen */
6088 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6089
6090 /* extract request details */
6091 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6092 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6093 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6094 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6095
6096 switch (type) {
6097 case HREQ_LOAD_CONFIG:
6098 case HREQ_SAVE_CONFIG:
6099 case HREQ_READ_CONFIG:
6100 case HREQ_SET_TX_EQ_ABS:
6101 case HREQ_SET_TX_EQ_REL:
6102 case HREQ_ENABLE:
6103 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6104 type);
6105 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6106 break;
6107
6108 case HREQ_CONFIG_DONE:
6109 hreq_response(dd, HREQ_SUCCESS, 0);
6110 break;
6111
6112 case HREQ_INTERFACE_TEST:
6113 hreq_response(dd, HREQ_SUCCESS, data);
6114 break;
6115
6116 default:
6117 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6118 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6119 break;
6120 }
6121}
6122
6123static void write_global_credit(struct hfi1_devdata *dd,
6124 u8 vau, u16 total, u16 shared)
6125{
6126 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6127 ((u64)total
6128 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6129 | ((u64)shared
6130 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6131 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6132}
6133
6134/*
6135 * Set up initial VL15 credits of the remote. Assumes the rest of
6136 * the CM credit registers are zero from a previous global or credit reset .
6137 */
6138void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6139{
6140 /* leave shared count at zero for both global and VL15 */
6141 write_global_credit(dd, vau, vl15buf, 0);
6142
6143 /* We may need some credits for another VL when sending packets
6144 * with the snoop interface. Dividing it down the middle for VL15
6145 * and VL0 should suffice.
6146 */
6147 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6148 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6149 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6150 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6151 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6152 } else {
6153 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6154 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6155 }
6156}
6157
6158/*
6159 * Zero all credit details from the previous connection and
6160 * reset the CM manager's internal counters.
6161 */
6162void reset_link_credits(struct hfi1_devdata *dd)
6163{
6164 int i;
6165
6166 /* remove all previous VL credit limits */
6167 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6168 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6169 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6170 write_global_credit(dd, 0, 0, 0);
6171 /* reset the CM block */
6172 pio_send_control(dd, PSC_CM_RESET);
6173}
6174
6175/* convert a vCU to a CU */
6176static u32 vcu_to_cu(u8 vcu)
6177{
6178 return 1 << vcu;
6179}
6180
6181/* convert a CU to a vCU */
6182static u8 cu_to_vcu(u32 cu)
6183{
6184 return ilog2(cu);
6185}
6186
6187/* convert a vAU to an AU */
6188static u32 vau_to_au(u8 vau)
6189{
6190 return 8 * (1 << vau);
6191}
6192
6193static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6194{
6195 ppd->sm_trap_qp = 0x0;
6196 ppd->sa_qp = 0x1;
6197}
6198
6199/*
6200 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6201 */
6202static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6203{
6204 u64 reg;
6205
6206 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6207 write_csr(dd, DC_LCB_CFG_RUN, 0);
6208 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6209 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6210 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6211 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6212 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6213 reg = read_csr(dd, DCC_CFG_RESET);
6214 write_csr(dd, DCC_CFG_RESET,
6215 reg
6216 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6217 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6218 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6219 if (!abort) {
6220 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6221 write_csr(dd, DCC_CFG_RESET, reg);
6222 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6223 }
6224}
6225
6226/*
6227 * This routine should be called after the link has been transitioned to
6228 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6229 * reset).
6230 *
6231 * The expectation is that the caller of this routine would have taken
6232 * care of properly transitioning the link into the correct state.
6233 */
6234static void dc_shutdown(struct hfi1_devdata *dd)
6235{
6236 unsigned long flags;
6237
6238 spin_lock_irqsave(&dd->dc8051_lock, flags);
6239 if (dd->dc_shutdown) {
6240 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6241 return;
6242 }
6243 dd->dc_shutdown = 1;
6244 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6245 /* Shutdown the LCB */
6246 lcb_shutdown(dd, 1);
6247 /* Going to OFFLINE would have causes the 8051 to put the
6248 * SerDes into reset already. Just need to shut down the 8051,
6249 * itself. */
6250 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6251}
6252
6253/* Calling this after the DC has been brought out of reset should not
6254 * do any damage. */
6255static void dc_start(struct hfi1_devdata *dd)
6256{
6257 unsigned long flags;
6258 int ret;
6259
6260 spin_lock_irqsave(&dd->dc8051_lock, flags);
6261 if (!dd->dc_shutdown)
6262 goto done;
6263 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6264 /* Take the 8051 out of reset */
6265 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6266 /* Wait until 8051 is ready */
6267 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6268 if (ret) {
6269 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6270 __func__);
6271 }
6272 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6273 write_csr(dd, DCC_CFG_RESET, 0x10);
6274 /* lcb_shutdown() with abort=1 does not restore these */
6275 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6276 spin_lock_irqsave(&dd->dc8051_lock, flags);
6277 dd->dc_shutdown = 0;
6278done:
6279 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6280}
6281
6282/*
6283 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6284 */
6285static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6286{
6287 u64 rx_radr, tx_radr;
6288 u32 version;
6289
6290 if (dd->icode != ICODE_FPGA_EMULATION)
6291 return;
6292
6293 /*
6294 * These LCB defaults on emulator _s are good, nothing to do here:
6295 * LCB_CFG_TX_FIFOS_RADR
6296 * LCB_CFG_RX_FIFOS_RADR
6297 * LCB_CFG_LN_DCLK
6298 * LCB_CFG_IGNORE_LOST_RCLK
6299 */
6300 if (is_emulator_s(dd))
6301 return;
6302 /* else this is _p */
6303
6304 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006305 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006306 version = 0x2d; /* all B0 use 0x2d or higher settings */
6307
6308 if (version <= 0x12) {
6309 /* release 0x12 and below */
6310
6311 /*
6312 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6313 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6314 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6315 */
6316 rx_radr =
6317 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6318 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6319 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6320 /*
6321 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6322 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6323 */
6324 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6325 } else if (version <= 0x18) {
6326 /* release 0x13 up to 0x18 */
6327 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6328 rx_radr =
6329 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6330 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6331 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6332 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6333 } else if (version == 0x19) {
6334 /* release 0x19 */
6335 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6336 rx_radr =
6337 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6338 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6339 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6340 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6341 } else if (version == 0x1a) {
6342 /* release 0x1a */
6343 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6344 rx_radr =
6345 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6346 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6347 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6348 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6349 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6350 } else {
6351 /* release 0x1b and higher */
6352 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6353 rx_radr =
6354 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6355 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6356 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6357 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6358 }
6359
6360 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6361 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6362 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6363 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6364 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6365}
6366
6367/*
6368 * Handle a SMA idle message
6369 *
6370 * This is a work-queue function outside of the interrupt.
6371 */
6372void handle_sma_message(struct work_struct *work)
6373{
6374 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6375 sma_message_work);
6376 struct hfi1_devdata *dd = ppd->dd;
6377 u64 msg;
6378 int ret;
6379
6380 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6381 is stripped off */
6382 ret = read_idle_sma(dd, &msg);
6383 if (ret)
6384 return;
6385 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6386 /*
6387 * React to the SMA message. Byte[1] (0 for us) is the command.
6388 */
6389 switch (msg & 0xff) {
6390 case SMA_IDLE_ARM:
6391 /*
6392 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6393 * State Transitions
6394 *
6395 * Only expected in INIT or ARMED, discard otherwise.
6396 */
6397 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6398 ppd->neighbor_normal = 1;
6399 break;
6400 case SMA_IDLE_ACTIVE:
6401 /*
6402 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6403 * State Transitions
6404 *
6405 * Can activate the node. Discard otherwise.
6406 */
6407 if (ppd->host_link_state == HLS_UP_ARMED
6408 && ppd->is_active_optimize_enabled) {
6409 ppd->neighbor_normal = 1;
6410 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6411 if (ret)
6412 dd_dev_err(
6413 dd,
6414 "%s: received Active SMA idle message, couldn't set link to Active\n",
6415 __func__);
6416 }
6417 break;
6418 default:
6419 dd_dev_err(dd,
6420 "%s: received unexpected SMA idle message 0x%llx\n",
6421 __func__, msg);
6422 break;
6423 }
6424}
6425
6426static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6427{
6428 u64 rcvctrl;
6429 unsigned long flags;
6430
6431 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6432 rcvctrl = read_csr(dd, RCV_CTRL);
6433 rcvctrl |= add;
6434 rcvctrl &= ~clear;
6435 write_csr(dd, RCV_CTRL, rcvctrl);
6436 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6437}
6438
6439static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6440{
6441 adjust_rcvctrl(dd, add, 0);
6442}
6443
6444static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6445{
6446 adjust_rcvctrl(dd, 0, clear);
6447}
6448
6449/*
6450 * Called from all interrupt handlers to start handling an SPC freeze.
6451 */
6452void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6453{
6454 struct hfi1_devdata *dd = ppd->dd;
6455 struct send_context *sc;
6456 int i;
6457
6458 if (flags & FREEZE_SELF)
6459 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6460
6461 /* enter frozen mode */
6462 dd->flags |= HFI1_FROZEN;
6463
6464 /* notify all SDMA engines that they are going into a freeze */
6465 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6466
6467 /* do halt pre-handling on all enabled send contexts */
6468 for (i = 0; i < dd->num_send_contexts; i++) {
6469 sc = dd->send_contexts[i].sc;
6470 if (sc && (sc->flags & SCF_ENABLED))
6471 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6472 }
6473
6474 /* Send context are frozen. Notify user space */
6475 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6476
6477 if (flags & FREEZE_ABORT) {
6478 dd_dev_err(dd,
6479 "Aborted freeze recovery. Please REBOOT system\n");
6480 return;
6481 }
6482 /* queue non-interrupt handler */
6483 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6484}
6485
6486/*
6487 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6488 * depending on the "freeze" parameter.
6489 *
6490 * No need to return an error if it times out, our only option
6491 * is to proceed anyway.
6492 */
6493static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6494{
6495 unsigned long timeout;
6496 u64 reg;
6497
6498 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6499 while (1) {
6500 reg = read_csr(dd, CCE_STATUS);
6501 if (freeze) {
6502 /* waiting until all indicators are set */
6503 if ((reg & ALL_FROZE) == ALL_FROZE)
6504 return; /* all done */
6505 } else {
6506 /* waiting until all indicators are clear */
6507 if ((reg & ALL_FROZE) == 0)
6508 return; /* all done */
6509 }
6510
6511 if (time_after(jiffies, timeout)) {
6512 dd_dev_err(dd,
6513 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6514 freeze ? "" : "un",
6515 reg & ALL_FROZE,
6516 freeze ? ALL_FROZE : 0ull);
6517 return;
6518 }
6519 usleep_range(80, 120);
6520 }
6521}
6522
6523/*
6524 * Do all freeze handling for the RXE block.
6525 */
6526static void rxe_freeze(struct hfi1_devdata *dd)
6527{
6528 int i;
6529
6530 /* disable port */
6531 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6532
6533 /* disable all receive contexts */
6534 for (i = 0; i < dd->num_rcv_contexts; i++)
6535 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6536}
6537
6538/*
6539 * Unfreeze handling for the RXE block - kernel contexts only.
6540 * This will also enable the port. User contexts will do unfreeze
6541 * handling on a per-context basis as they call into the driver.
6542 *
6543 */
6544static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6545{
6546 int i;
6547
6548 /* enable all kernel contexts */
6549 for (i = 0; i < dd->n_krcv_queues; i++)
6550 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6551
6552 /* enable port */
6553 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6554}
6555
6556/*
6557 * Non-interrupt SPC freeze handling.
6558 *
6559 * This is a work-queue function outside of the triggering interrupt.
6560 */
6561void handle_freeze(struct work_struct *work)
6562{
6563 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6564 freeze_work);
6565 struct hfi1_devdata *dd = ppd->dd;
6566
6567 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006568 wait_for_freeze_status(dd, 1);
6569
6570 /* SPC is now frozen */
6571
6572 /* do send PIO freeze steps */
6573 pio_freeze(dd);
6574
6575 /* do send DMA freeze steps */
6576 sdma_freeze(dd);
6577
6578 /* do send egress freeze steps - nothing to do */
6579
6580 /* do receive freeze steps */
6581 rxe_freeze(dd);
6582
6583 /*
6584 * Unfreeze the hardware - clear the freeze, wait for each
6585 * block's frozen bit to clear, then clear the frozen flag.
6586 */
6587 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6588 wait_for_freeze_status(dd, 0);
6589
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006590 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006591 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6592 wait_for_freeze_status(dd, 1);
6593 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6594 wait_for_freeze_status(dd, 0);
6595 }
6596
6597 /* do send PIO unfreeze steps for kernel contexts */
6598 pio_kernel_unfreeze(dd);
6599
6600 /* do send DMA unfreeze steps */
6601 sdma_unfreeze(dd);
6602
6603 /* do send egress unfreeze steps - nothing to do */
6604
6605 /* do receive unfreeze steps for kernel contexts */
6606 rxe_kernel_unfreeze(dd);
6607
6608 /*
6609 * The unfreeze procedure touches global device registers when
6610 * it disables and re-enables RXE. Mark the device unfrozen
6611 * after all that is done so other parts of the driver waiting
6612 * for the device to unfreeze don't do things out of order.
6613 *
6614 * The above implies that the meaning of HFI1_FROZEN flag is
6615 * "Device has gone into freeze mode and freeze mode handling
6616 * is still in progress."
6617 *
6618 * The flag will be removed when freeze mode processing has
6619 * completed.
6620 */
6621 dd->flags &= ~HFI1_FROZEN;
6622 wake_up(&dd->event_queue);
6623
6624 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006625}
6626
6627/*
6628 * Handle a link up interrupt from the 8051.
6629 *
6630 * This is a work-queue function outside of the interrupt.
6631 */
6632void handle_link_up(struct work_struct *work)
6633{
6634 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6635 link_up_work);
6636 set_link_state(ppd, HLS_UP_INIT);
6637
6638 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6639 read_ltp_rtt(ppd->dd);
6640 /*
6641 * OPA specifies that certain counters are cleared on a transition
6642 * to link up, so do that.
6643 */
6644 clear_linkup_counters(ppd->dd);
6645 /*
6646 * And (re)set link up default values.
6647 */
6648 set_linkup_defaults(ppd);
6649
6650 /* enforce link speed enabled */
6651 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6652 /* oops - current speed is not enabled, bounce */
6653 dd_dev_err(ppd->dd,
6654 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6655 ppd->link_speed_active, ppd->link_speed_enabled);
6656 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6657 OPA_LINKDOWN_REASON_SPEED_POLICY);
6658 set_link_state(ppd, HLS_DN_OFFLINE);
6659 start_link(ppd);
6660 }
6661}
6662
6663/* Several pieces of LNI information were cached for SMA in ppd.
6664 * Reset these on link down */
6665static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6666{
6667 ppd->neighbor_guid = 0;
6668 ppd->neighbor_port_number = 0;
6669 ppd->neighbor_type = 0;
6670 ppd->neighbor_fm_security = 0;
6671}
6672
6673/*
6674 * Handle a link down interrupt from the 8051.
6675 *
6676 * This is a work-queue function outside of the interrupt.
6677 */
6678void handle_link_down(struct work_struct *work)
6679{
6680 u8 lcl_reason, neigh_reason = 0;
6681 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6682 link_down_work);
6683
6684 /* go offline first, then deal with reasons */
6685 set_link_state(ppd, HLS_DN_OFFLINE);
6686
6687 lcl_reason = 0;
6688 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6689
6690 /*
6691 * If no reason, assume peer-initiated but missed
6692 * LinkGoingDown idle flits.
6693 */
6694 if (neigh_reason == 0)
6695 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6696
6697 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6698
6699 reset_neighbor_info(ppd);
6700
6701 /* disable the port */
6702 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6703
6704 /* If there is no cable attached, turn the DC off. Otherwise,
6705 * start the link bring up. */
6706 if (!qsfp_mod_present(ppd))
6707 dc_shutdown(ppd->dd);
6708 else
6709 start_link(ppd);
6710}
6711
6712void handle_link_bounce(struct work_struct *work)
6713{
6714 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6715 link_bounce_work);
6716
6717 /*
6718 * Only do something if the link is currently up.
6719 */
6720 if (ppd->host_link_state & HLS_UP) {
6721 set_link_state(ppd, HLS_DN_OFFLINE);
6722 start_link(ppd);
6723 } else {
6724 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6725 __func__, link_state_name(ppd->host_link_state));
6726 }
6727}
6728
6729/*
6730 * Mask conversion: Capability exchange to Port LTP. The capability
6731 * exchange has an implicit 16b CRC that is mandatory.
6732 */
6733static int cap_to_port_ltp(int cap)
6734{
6735 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6736
6737 if (cap & CAP_CRC_14B)
6738 port_ltp |= PORT_LTP_CRC_MODE_14;
6739 if (cap & CAP_CRC_48B)
6740 port_ltp |= PORT_LTP_CRC_MODE_48;
6741 if (cap & CAP_CRC_12B_16B_PER_LANE)
6742 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6743
6744 return port_ltp;
6745}
6746
6747/*
6748 * Convert an OPA Port LTP mask to capability mask
6749 */
6750int port_ltp_to_cap(int port_ltp)
6751{
6752 int cap_mask = 0;
6753
6754 if (port_ltp & PORT_LTP_CRC_MODE_14)
6755 cap_mask |= CAP_CRC_14B;
6756 if (port_ltp & PORT_LTP_CRC_MODE_48)
6757 cap_mask |= CAP_CRC_48B;
6758 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6759 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6760
6761 return cap_mask;
6762}
6763
6764/*
6765 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6766 */
6767static int lcb_to_port_ltp(int lcb_crc)
6768{
6769 int port_ltp = 0;
6770
6771 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6772 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6773 else if (lcb_crc == LCB_CRC_48B)
6774 port_ltp = PORT_LTP_CRC_MODE_48;
6775 else if (lcb_crc == LCB_CRC_14B)
6776 port_ltp = PORT_LTP_CRC_MODE_14;
6777 else
6778 port_ltp = PORT_LTP_CRC_MODE_16;
6779
6780 return port_ltp;
6781}
6782
6783/*
6784 * Our neighbor has indicated that we are allowed to act as a fabric
6785 * manager, so place the full management partition key in the second
6786 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6787 * that we should already have the limited management partition key in
6788 * array element 1, and also that the port is not yet up when
6789 * add_full_mgmt_pkey() is invoked.
6790 */
6791static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6792{
6793 struct hfi1_devdata *dd = ppd->dd;
6794
Dean Luick87645222015-12-01 15:38:21 -05006795 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6796 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6797 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6798 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006799 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6800 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6801}
6802
6803/*
6804 * Convert the given link width to the OPA link width bitmask.
6805 */
6806static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6807{
6808 switch (width) {
6809 case 0:
6810 /*
6811 * Simulator and quick linkup do not set the width.
6812 * Just set it to 4x without complaint.
6813 */
6814 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6815 return OPA_LINK_WIDTH_4X;
6816 return 0; /* no lanes up */
6817 case 1: return OPA_LINK_WIDTH_1X;
6818 case 2: return OPA_LINK_WIDTH_2X;
6819 case 3: return OPA_LINK_WIDTH_3X;
6820 default:
6821 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6822 __func__, width);
6823 /* fall through */
6824 case 4: return OPA_LINK_WIDTH_4X;
6825 }
6826}
6827
6828/*
6829 * Do a population count on the bottom nibble.
6830 */
6831static const u8 bit_counts[16] = {
6832 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6833};
6834static inline u8 nibble_to_count(u8 nibble)
6835{
6836 return bit_counts[nibble & 0xf];
6837}
6838
6839/*
6840 * Read the active lane information from the 8051 registers and return
6841 * their widths.
6842 *
6843 * Active lane information is found in these 8051 registers:
6844 * enable_lane_tx
6845 * enable_lane_rx
6846 */
6847static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6848 u16 *rx_width)
6849{
6850 u16 tx, rx;
6851 u8 enable_lane_rx;
6852 u8 enable_lane_tx;
6853 u8 tx_polarity_inversion;
6854 u8 rx_polarity_inversion;
6855 u8 max_rate;
6856
6857 /* read the active lanes */
6858 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6859 &rx_polarity_inversion, &max_rate);
6860 read_local_lni(dd, &enable_lane_rx);
6861
6862 /* convert to counts */
6863 tx = nibble_to_count(enable_lane_tx);
6864 rx = nibble_to_count(enable_lane_rx);
6865
6866 /*
6867 * Set link_speed_active here, overriding what was set in
6868 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
6869 * set the max_rate field in handle_verify_cap until v0.19.
6870 */
6871 if ((dd->icode == ICODE_RTL_SILICON)
6872 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6873 /* max_rate: 0 = 12.5G, 1 = 25G */
6874 switch (max_rate) {
6875 case 0:
6876 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6877 break;
6878 default:
6879 dd_dev_err(dd,
6880 "%s: unexpected max rate %d, using 25Gb\n",
6881 __func__, (int)max_rate);
6882 /* fall through */
6883 case 1:
6884 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6885 break;
6886 }
6887 }
6888
6889 dd_dev_info(dd,
6890 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6891 enable_lane_tx, tx, enable_lane_rx, rx);
6892 *tx_width = link_width_to_bits(dd, tx);
6893 *rx_width = link_width_to_bits(dd, rx);
6894}
6895
6896/*
6897 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6898 * Valid after the end of VerifyCap and during LinkUp. Does not change
6899 * after link up. I.e. look elsewhere for downgrade information.
6900 *
6901 * Bits are:
6902 * + bits [7:4] contain the number of active transmitters
6903 * + bits [3:0] contain the number of active receivers
6904 * These are numbers 1 through 4 and can be different values if the
6905 * link is asymmetric.
6906 *
6907 * verify_cap_local_fm_link_width[0] retains its original value.
6908 */
6909static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6910 u16 *rx_width)
6911{
6912 u16 widths, tx, rx;
6913 u8 misc_bits, local_flags;
6914 u16 active_tx, active_rx;
6915
6916 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6917 tx = widths >> 12;
6918 rx = (widths >> 8) & 0xf;
6919
6920 *tx_width = link_width_to_bits(dd, tx);
6921 *rx_width = link_width_to_bits(dd, rx);
6922
6923 /* print the active widths */
6924 get_link_widths(dd, &active_tx, &active_rx);
6925}
6926
6927/*
6928 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6929 * hardware information when the link first comes up.
6930 *
6931 * The link width is not available until after VerifyCap.AllFramesReceived
6932 * (the trigger for handle_verify_cap), so this is outside that routine
6933 * and should be called when the 8051 signals linkup.
6934 */
6935void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6936{
6937 u16 tx_width, rx_width;
6938
6939 /* get end-of-LNI link widths */
6940 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
6941
6942 /* use tx_width as the link is supposed to be symmetric on link up */
6943 ppd->link_width_active = tx_width;
6944 /* link width downgrade active (LWD.A) starts out matching LW.A */
6945 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
6946 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
6947 /* per OPA spec, on link up LWD.E resets to LWD.S */
6948 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
6949 /* cache the active egress rate (units {10^6 bits/sec]) */
6950 ppd->current_egress_rate = active_egress_rate(ppd);
6951}
6952
6953/*
6954 * Handle a verify capabilities interrupt from the 8051.
6955 *
6956 * This is a work-queue function outside of the interrupt.
6957 */
6958void handle_verify_cap(struct work_struct *work)
6959{
6960 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6961 link_vc_work);
6962 struct hfi1_devdata *dd = ppd->dd;
6963 u64 reg;
6964 u8 power_management;
6965 u8 continious;
6966 u8 vcu;
6967 u8 vau;
6968 u8 z;
6969 u16 vl15buf;
6970 u16 link_widths;
6971 u16 crc_mask;
6972 u16 crc_val;
6973 u16 device_id;
6974 u16 active_tx, active_rx;
6975 u8 partner_supported_crc;
6976 u8 remote_tx_rate;
6977 u8 device_rev;
6978
6979 set_link_state(ppd, HLS_VERIFY_CAP);
6980
6981 lcb_shutdown(dd, 0);
6982 adjust_lcb_for_fpga_serdes(dd);
6983
6984 /*
6985 * These are now valid:
6986 * remote VerifyCap fields in the general LNI config
6987 * CSR DC8051_STS_REMOTE_GUID
6988 * CSR DC8051_STS_REMOTE_NODE_TYPE
6989 * CSR DC8051_STS_REMOTE_FM_SECURITY
6990 * CSR DC8051_STS_REMOTE_PORT_NO
6991 */
6992
6993 read_vc_remote_phy(dd, &power_management, &continious);
6994 read_vc_remote_fabric(
6995 dd,
6996 &vau,
6997 &z,
6998 &vcu,
6999 &vl15buf,
7000 &partner_supported_crc);
7001 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7002 read_remote_device_id(dd, &device_id, &device_rev);
7003 /*
7004 * And the 'MgmtAllowed' information, which is exchanged during
7005 * LNI, is also be available at this point.
7006 */
7007 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7008 /* print the active widths */
7009 get_link_widths(dd, &active_tx, &active_rx);
7010 dd_dev_info(dd,
7011 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7012 (int)power_management, (int)continious);
7013 dd_dev_info(dd,
7014 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7015 (int)vau,
7016 (int)z,
7017 (int)vcu,
7018 (int)vl15buf,
7019 (int)partner_supported_crc);
7020 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7021 (u32)remote_tx_rate, (u32)link_widths);
7022 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7023 (u32)device_id, (u32)device_rev);
7024 /*
7025 * The peer vAU value just read is the peer receiver value. HFI does
7026 * not support a transmit vAU of 0 (AU == 8). We advertised that
7027 * with Z=1 in the fabric capabilities sent to the peer. The peer
7028 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7029 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7030 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7031 * subject to the Z value exception.
7032 */
7033 if (vau == 0)
7034 vau = 1;
7035 set_up_vl15(dd, vau, vl15buf);
7036
7037 /* set up the LCB CRC mode */
7038 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7039
7040 /* order is important: use the lowest bit in common */
7041 if (crc_mask & CAP_CRC_14B)
7042 crc_val = LCB_CRC_14B;
7043 else if (crc_mask & CAP_CRC_48B)
7044 crc_val = LCB_CRC_48B;
7045 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7046 crc_val = LCB_CRC_12B_16B_PER_LANE;
7047 else
7048 crc_val = LCB_CRC_16B;
7049
7050 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7051 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7052 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7053
7054 /* set (14b only) or clear sideband credit */
7055 reg = read_csr(dd, SEND_CM_CTRL);
7056 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7057 write_csr(dd, SEND_CM_CTRL,
7058 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7059 } else {
7060 write_csr(dd, SEND_CM_CTRL,
7061 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7062 }
7063
7064 ppd->link_speed_active = 0; /* invalid value */
7065 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7066 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7067 switch (remote_tx_rate) {
7068 case 0:
7069 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7070 break;
7071 case 1:
7072 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7073 break;
7074 }
7075 } else {
7076 /* actual rate is highest bit of the ANDed rates */
7077 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7078
7079 if (rate & 2)
7080 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7081 else if (rate & 1)
7082 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7083 }
7084 if (ppd->link_speed_active == 0) {
7085 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7086 __func__, (int)remote_tx_rate);
7087 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7088 }
7089
7090 /*
7091 * Cache the values of the supported, enabled, and active
7092 * LTP CRC modes to return in 'portinfo' queries. But the bit
7093 * flags that are returned in the portinfo query differ from
7094 * what's in the link_crc_mask, crc_sizes, and crc_val
7095 * variables. Convert these here.
7096 */
7097 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7098 /* supported crc modes */
7099 ppd->port_ltp_crc_mode |=
7100 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7101 /* enabled crc modes */
7102 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7103 /* active crc mode */
7104
7105 /* set up the remote credit return table */
7106 assign_remote_cm_au_table(dd, vcu);
7107
7108 /*
7109 * The LCB is reset on entry to handle_verify_cap(), so this must
7110 * be applied on every link up.
7111 *
7112 * Adjust LCB error kill enable to kill the link if
7113 * these RBUF errors are seen:
7114 * REPLAY_BUF_MBE_SMASK
7115 * FLIT_INPUT_BUF_MBE_SMASK
7116 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007117 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007118 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7119 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7120 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7121 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7122 }
7123
7124 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7125 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7126
7127 /* give 8051 access to the LCB CSRs */
7128 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7129 set_8051_lcb_access(dd);
7130
7131 ppd->neighbor_guid =
7132 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7133 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7134 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7135 ppd->neighbor_type =
7136 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7137 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7138 ppd->neighbor_fm_security =
7139 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7140 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7141 dd_dev_info(dd,
7142 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7143 ppd->neighbor_guid, ppd->neighbor_type,
7144 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7145 if (ppd->mgmt_allowed)
7146 add_full_mgmt_pkey(ppd);
7147
7148 /* tell the 8051 to go to LinkUp */
7149 set_link_state(ppd, HLS_GOING_UP);
7150}
7151
7152/*
7153 * Apply the link width downgrade enabled policy against the current active
7154 * link widths.
7155 *
7156 * Called when the enabled policy changes or the active link widths change.
7157 */
7158void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7159{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007160 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007161 int tries;
7162 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007163 u16 tx, rx;
7164
Dean Luick323fd782015-11-16 21:59:24 -05007165 /* use the hls lock to avoid a race with actual link up */
7166 tries = 0;
7167retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007168 mutex_lock(&ppd->hls_lock);
7169 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007170 if (!(ppd->host_link_state & HLS_UP)) {
7171 /* still going up..wait and retry */
7172 if (ppd->host_link_state & HLS_GOING_UP) {
7173 if (++tries < 1000) {
7174 mutex_unlock(&ppd->hls_lock);
7175 usleep_range(100, 120); /* arbitrary */
7176 goto retry;
7177 }
7178 dd_dev_err(ppd->dd,
7179 "%s: giving up waiting for link state change\n",
7180 __func__);
7181 }
7182 goto done;
7183 }
7184
7185 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007186
7187 if (refresh_widths) {
7188 get_link_widths(ppd->dd, &tx, &rx);
7189 ppd->link_width_downgrade_tx_active = tx;
7190 ppd->link_width_downgrade_rx_active = rx;
7191 }
7192
7193 if (lwde == 0) {
7194 /* downgrade is disabled */
7195
7196 /* bounce if not at starting active width */
7197 if ((ppd->link_width_active !=
7198 ppd->link_width_downgrade_tx_active)
7199 || (ppd->link_width_active !=
7200 ppd->link_width_downgrade_rx_active)) {
7201 dd_dev_err(ppd->dd,
7202 "Link downgrade is disabled and link has downgraded, downing link\n");
7203 dd_dev_err(ppd->dd,
7204 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7205 ppd->link_width_active,
7206 ppd->link_width_downgrade_tx_active,
7207 ppd->link_width_downgrade_rx_active);
7208 do_bounce = 1;
7209 }
7210 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7211 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7212 /* Tx or Rx is outside the enabled policy */
7213 dd_dev_err(ppd->dd,
7214 "Link is outside of downgrade allowed, downing link\n");
7215 dd_dev_err(ppd->dd,
7216 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7217 lwde,
7218 ppd->link_width_downgrade_tx_active,
7219 ppd->link_width_downgrade_rx_active);
7220 do_bounce = 1;
7221 }
7222
Dean Luick323fd782015-11-16 21:59:24 -05007223done:
7224 mutex_unlock(&ppd->hls_lock);
7225
Mike Marciniszyn77241052015-07-30 15:17:43 -04007226 if (do_bounce) {
7227 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7228 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7229 set_link_state(ppd, HLS_DN_OFFLINE);
7230 start_link(ppd);
7231 }
7232}
7233
7234/*
7235 * Handle a link downgrade interrupt from the 8051.
7236 *
7237 * This is a work-queue function outside of the interrupt.
7238 */
7239void handle_link_downgrade(struct work_struct *work)
7240{
7241 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7242 link_downgrade_work);
7243
7244 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7245 apply_link_downgrade_policy(ppd, 1);
7246}
7247
7248static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7249{
7250 return flag_string(buf, buf_len, flags, dcc_err_flags,
7251 ARRAY_SIZE(dcc_err_flags));
7252}
7253
7254static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7255{
7256 return flag_string(buf, buf_len, flags, lcb_err_flags,
7257 ARRAY_SIZE(lcb_err_flags));
7258}
7259
7260static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7261{
7262 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7263 ARRAY_SIZE(dc8051_err_flags));
7264}
7265
7266static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7267{
7268 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7269 ARRAY_SIZE(dc8051_info_err_flags));
7270}
7271
7272static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7273{
7274 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7275 ARRAY_SIZE(dc8051_info_host_msg_flags));
7276}
7277
7278static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7279{
7280 struct hfi1_pportdata *ppd = dd->pport;
7281 u64 info, err, host_msg;
7282 int queue_link_down = 0;
7283 char buf[96];
7284
7285 /* look at the flags */
7286 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7287 /* 8051 information set by firmware */
7288 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7289 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7290 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7291 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7292 host_msg = (info >>
7293 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7294 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7295
7296 /*
7297 * Handle error flags.
7298 */
7299 if (err & FAILED_LNI) {
7300 /*
7301 * LNI error indications are cleared by the 8051
7302 * only when starting polling. Only pay attention
7303 * to them when in the states that occur during
7304 * LNI.
7305 */
7306 if (ppd->host_link_state
7307 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7308 queue_link_down = 1;
7309 dd_dev_info(dd, "Link error: %s\n",
7310 dc8051_info_err_string(buf,
7311 sizeof(buf),
7312 err & FAILED_LNI));
7313 }
7314 err &= ~(u64)FAILED_LNI;
7315 }
Dean Luick6d014532015-12-01 15:38:23 -05007316 /* unknown frames can happen durning LNI, just count */
7317 if (err & UNKNOWN_FRAME) {
7318 ppd->unknown_frame_count++;
7319 err &= ~(u64)UNKNOWN_FRAME;
7320 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007321 if (err) {
7322 /* report remaining errors, but do not do anything */
7323 dd_dev_err(dd, "8051 info error: %s\n",
7324 dc8051_info_err_string(buf, sizeof(buf), err));
7325 }
7326
7327 /*
7328 * Handle host message flags.
7329 */
7330 if (host_msg & HOST_REQ_DONE) {
7331 /*
7332 * Presently, the driver does a busy wait for
7333 * host requests to complete. This is only an
7334 * informational message.
7335 * NOTE: The 8051 clears the host message
7336 * information *on the next 8051 command*.
7337 * Therefore, when linkup is achieved,
7338 * this flag will still be set.
7339 */
7340 host_msg &= ~(u64)HOST_REQ_DONE;
7341 }
7342 if (host_msg & BC_SMA_MSG) {
7343 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7344 host_msg &= ~(u64)BC_SMA_MSG;
7345 }
7346 if (host_msg & LINKUP_ACHIEVED) {
7347 dd_dev_info(dd, "8051: Link up\n");
7348 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7349 host_msg &= ~(u64)LINKUP_ACHIEVED;
7350 }
7351 if (host_msg & EXT_DEVICE_CFG_REQ) {
7352 handle_8051_request(dd);
7353 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7354 }
7355 if (host_msg & VERIFY_CAP_FRAME) {
7356 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7357 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7358 }
7359 if (host_msg & LINK_GOING_DOWN) {
7360 const char *extra = "";
7361 /* no downgrade action needed if going down */
7362 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7363 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7364 extra = " (ignoring downgrade)";
7365 }
7366 dd_dev_info(dd, "8051: Link down%s\n", extra);
7367 queue_link_down = 1;
7368 host_msg &= ~(u64)LINK_GOING_DOWN;
7369 }
7370 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7371 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7372 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7373 }
7374 if (host_msg) {
7375 /* report remaining messages, but do not do anything */
7376 dd_dev_info(dd, "8051 info host message: %s\n",
7377 dc8051_info_host_msg_string(buf, sizeof(buf),
7378 host_msg));
7379 }
7380
7381 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7382 }
7383 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7384 /*
7385 * Lost the 8051 heartbeat. If this happens, we
7386 * receive constant interrupts about it. Disable
7387 * the interrupt after the first.
7388 */
7389 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7390 write_csr(dd, DC_DC8051_ERR_EN,
7391 read_csr(dd, DC_DC8051_ERR_EN)
7392 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7393
7394 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7395 }
7396 if (reg) {
7397 /* report the error, but do not do anything */
7398 dd_dev_err(dd, "8051 error: %s\n",
7399 dc8051_err_string(buf, sizeof(buf), reg));
7400 }
7401
7402 if (queue_link_down) {
7403 /* if the link is already going down or disabled, do not
7404 * queue another */
7405 if ((ppd->host_link_state
7406 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7407 || ppd->link_enabled == 0) {
7408 dd_dev_info(dd, "%s: not queuing link down\n",
7409 __func__);
7410 } else {
7411 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7412 }
7413 }
7414}
7415
7416static const char * const fm_config_txt[] = {
7417[0] =
7418 "BadHeadDist: Distance violation between two head flits",
7419[1] =
7420 "BadTailDist: Distance violation between two tail flits",
7421[2] =
7422 "BadCtrlDist: Distance violation between two credit control flits",
7423[3] =
7424 "BadCrdAck: Credits return for unsupported VL",
7425[4] =
7426 "UnsupportedVLMarker: Received VL Marker",
7427[5] =
7428 "BadPreempt: Exceeded the preemption nesting level",
7429[6] =
7430 "BadControlFlit: Received unsupported control flit",
7431/* no 7 */
7432[8] =
7433 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7434};
7435
7436static const char * const port_rcv_txt[] = {
7437[1] =
7438 "BadPktLen: Illegal PktLen",
7439[2] =
7440 "PktLenTooLong: Packet longer than PktLen",
7441[3] =
7442 "PktLenTooShort: Packet shorter than PktLen",
7443[4] =
7444 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7445[5] =
7446 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7447[6] =
7448 "BadL2: Illegal L2 opcode",
7449[7] =
7450 "BadSC: Unsupported SC",
7451[9] =
7452 "BadRC: Illegal RC",
7453[11] =
7454 "PreemptError: Preempting with same VL",
7455[12] =
7456 "PreemptVL15: Preempting a VL15 packet",
7457};
7458
7459#define OPA_LDR_FMCONFIG_OFFSET 16
7460#define OPA_LDR_PORTRCV_OFFSET 0
7461static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7462{
7463 u64 info, hdr0, hdr1;
7464 const char *extra;
7465 char buf[96];
7466 struct hfi1_pportdata *ppd = dd->pport;
7467 u8 lcl_reason = 0;
7468 int do_bounce = 0;
7469
7470 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7471 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7472 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7473 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7474 /* set status bit */
7475 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7476 }
7477 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7478 }
7479
7480 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7481 struct hfi1_pportdata *ppd = dd->pport;
7482 /* this counter saturates at (2^32) - 1 */
7483 if (ppd->link_downed < (u32)UINT_MAX)
7484 ppd->link_downed++;
7485 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7486 }
7487
7488 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7489 u8 reason_valid = 1;
7490
7491 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7492 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7493 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7494 /* set status bit */
7495 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7496 }
7497 switch (info) {
7498 case 0:
7499 case 1:
7500 case 2:
7501 case 3:
7502 case 4:
7503 case 5:
7504 case 6:
7505 extra = fm_config_txt[info];
7506 break;
7507 case 8:
7508 extra = fm_config_txt[info];
7509 if (ppd->port_error_action &
7510 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7511 do_bounce = 1;
7512 /*
7513 * lcl_reason cannot be derived from info
7514 * for this error
7515 */
7516 lcl_reason =
7517 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7518 }
7519 break;
7520 default:
7521 reason_valid = 0;
7522 snprintf(buf, sizeof(buf), "reserved%lld", info);
7523 extra = buf;
7524 break;
7525 }
7526
7527 if (reason_valid && !do_bounce) {
7528 do_bounce = ppd->port_error_action &
7529 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7530 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7531 }
7532
7533 /* just report this */
7534 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7535 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7536 }
7537
7538 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7539 u8 reason_valid = 1;
7540
7541 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7542 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7543 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7544 if (!(dd->err_info_rcvport.status_and_code &
7545 OPA_EI_STATUS_SMASK)) {
7546 dd->err_info_rcvport.status_and_code =
7547 info & OPA_EI_CODE_SMASK;
7548 /* set status bit */
7549 dd->err_info_rcvport.status_and_code |=
7550 OPA_EI_STATUS_SMASK;
7551 /* save first 2 flits in the packet that caused
7552 * the error */
7553 dd->err_info_rcvport.packet_flit1 = hdr0;
7554 dd->err_info_rcvport.packet_flit2 = hdr1;
7555 }
7556 switch (info) {
7557 case 1:
7558 case 2:
7559 case 3:
7560 case 4:
7561 case 5:
7562 case 6:
7563 case 7:
7564 case 9:
7565 case 11:
7566 case 12:
7567 extra = port_rcv_txt[info];
7568 break;
7569 default:
7570 reason_valid = 0;
7571 snprintf(buf, sizeof(buf), "reserved%lld", info);
7572 extra = buf;
7573 break;
7574 }
7575
7576 if (reason_valid && !do_bounce) {
7577 do_bounce = ppd->port_error_action &
7578 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7579 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7580 }
7581
7582 /* just report this */
7583 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7584 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7585 hdr0, hdr1);
7586
7587 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7588 }
7589
7590 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7591 /* informative only */
7592 dd_dev_info(dd, "8051 access to LCB blocked\n");
7593 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7594 }
7595 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7596 /* informative only */
7597 dd_dev_info(dd, "host access to LCB blocked\n");
7598 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7599 }
7600
7601 /* report any remaining errors */
7602 if (reg)
7603 dd_dev_info(dd, "DCC Error: %s\n",
7604 dcc_err_string(buf, sizeof(buf), reg));
7605
7606 if (lcl_reason == 0)
7607 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7608
7609 if (do_bounce) {
7610 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7611 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7612 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7613 }
7614}
7615
7616static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7617{
7618 char buf[96];
7619
7620 dd_dev_info(dd, "LCB Error: %s\n",
7621 lcb_err_string(buf, sizeof(buf), reg));
7622}
7623
7624/*
7625 * CCE block DC interrupt. Source is < 8.
7626 */
7627static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7628{
7629 const struct err_reg_info *eri = &dc_errs[source];
7630
7631 if (eri->handler) {
7632 interrupt_clear_down(dd, 0, eri);
7633 } else if (source == 3 /* dc_lbm_int */) {
7634 /*
7635 * This indicates that a parity error has occurred on the
7636 * address/control lines presented to the LBM. The error
7637 * is a single pulse, there is no associated error flag,
7638 * and it is non-maskable. This is because if a parity
7639 * error occurs on the request the request is dropped.
7640 * This should never occur, but it is nice to know if it
7641 * ever does.
7642 */
7643 dd_dev_err(dd, "Parity error in DC LBM block\n");
7644 } else {
7645 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7646 }
7647}
7648
7649/*
7650 * TX block send credit interrupt. Source is < 160.
7651 */
7652static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7653{
7654 sc_group_release_update(dd, source);
7655}
7656
7657/*
7658 * TX block SDMA interrupt. Source is < 48.
7659 *
7660 * SDMA interrupts are grouped by type:
7661 *
7662 * 0 - N-1 = SDma
7663 * N - 2N-1 = SDmaProgress
7664 * 2N - 3N-1 = SDmaIdle
7665 */
7666static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7667{
7668 /* what interrupt */
7669 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7670 /* which engine */
7671 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7672
7673#ifdef CONFIG_SDMA_VERBOSITY
7674 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7675 slashstrip(__FILE__), __LINE__, __func__);
7676 sdma_dumpstate(&dd->per_sdma[which]);
7677#endif
7678
7679 if (likely(what < 3 && which < dd->num_sdma)) {
7680 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7681 } else {
7682 /* should not happen */
7683 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7684 }
7685}
7686
7687/*
7688 * RX block receive available interrupt. Source is < 160.
7689 */
7690static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7691{
7692 struct hfi1_ctxtdata *rcd;
7693 char *err_detail;
7694
7695 if (likely(source < dd->num_rcv_contexts)) {
7696 rcd = dd->rcd[source];
7697 if (rcd) {
7698 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007699 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007700 else
7701 handle_user_interrupt(rcd);
7702 return; /* OK */
7703 }
7704 /* received an interrupt, but no rcd */
7705 err_detail = "dataless";
7706 } else {
7707 /* received an interrupt, but are not using that context */
7708 err_detail = "out of range";
7709 }
7710 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7711 err_detail, source);
7712}
7713
7714/*
7715 * RX block receive urgent interrupt. Source is < 160.
7716 */
7717static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7718{
7719 struct hfi1_ctxtdata *rcd;
7720 char *err_detail;
7721
7722 if (likely(source < dd->num_rcv_contexts)) {
7723 rcd = dd->rcd[source];
7724 if (rcd) {
7725 /* only pay attention to user urgent interrupts */
7726 if (source >= dd->first_user_ctxt)
7727 handle_user_interrupt(rcd);
7728 return; /* OK */
7729 }
7730 /* received an interrupt, but no rcd */
7731 err_detail = "dataless";
7732 } else {
7733 /* received an interrupt, but are not using that context */
7734 err_detail = "out of range";
7735 }
7736 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7737 err_detail, source);
7738}
7739
7740/*
7741 * Reserved range interrupt. Should not be called in normal operation.
7742 */
7743static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7744{
7745 char name[64];
7746
7747 dd_dev_err(dd, "unexpected %s interrupt\n",
7748 is_reserved_name(name, sizeof(name), source));
7749}
7750
7751static const struct is_table is_table[] = {
7752/* start end
7753 name func interrupt func */
7754{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7755 is_misc_err_name, is_misc_err_int },
7756{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7757 is_sdma_eng_err_name, is_sdma_eng_err_int },
7758{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7759 is_sendctxt_err_name, is_sendctxt_err_int },
7760{ IS_SDMA_START, IS_SDMA_END,
7761 is_sdma_eng_name, is_sdma_eng_int },
7762{ IS_VARIOUS_START, IS_VARIOUS_END,
7763 is_various_name, is_various_int },
7764{ IS_DC_START, IS_DC_END,
7765 is_dc_name, is_dc_int },
7766{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7767 is_rcv_avail_name, is_rcv_avail_int },
7768{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7769 is_rcv_urgent_name, is_rcv_urgent_int },
7770{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7771 is_send_credit_name, is_send_credit_int},
7772{ IS_RESERVED_START, IS_RESERVED_END,
7773 is_reserved_name, is_reserved_int},
7774};
7775
7776/*
7777 * Interrupt source interrupt - called when the given source has an interrupt.
7778 * Source is a bit index into an array of 64-bit integers.
7779 */
7780static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7781{
7782 const struct is_table *entry;
7783
7784 /* avoids a double compare by walking the table in-order */
7785 for (entry = &is_table[0]; entry->is_name; entry++) {
7786 if (source < entry->end) {
7787 trace_hfi1_interrupt(dd, entry, source);
7788 entry->is_int(dd, source - entry->start);
7789 return;
7790 }
7791 }
7792 /* fell off the end */
7793 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7794}
7795
7796/*
7797 * General interrupt handler. This is able to correctly handle
7798 * all interrupts in case INTx is used.
7799 */
7800static irqreturn_t general_interrupt(int irq, void *data)
7801{
7802 struct hfi1_devdata *dd = data;
7803 u64 regs[CCE_NUM_INT_CSRS];
7804 u32 bit;
7805 int i;
7806
7807 this_cpu_inc(*dd->int_counter);
7808
7809 /* phase 1: scan and clear all handled interrupts */
7810 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7811 if (dd->gi_mask[i] == 0) {
7812 regs[i] = 0; /* used later */
7813 continue;
7814 }
7815 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7816 dd->gi_mask[i];
7817 /* only clear if anything is set */
7818 if (regs[i])
7819 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7820 }
7821
7822 /* phase 2: call the appropriate handler */
7823 for_each_set_bit(bit, (unsigned long *)&regs[0],
7824 CCE_NUM_INT_CSRS*64) {
7825 is_interrupt(dd, bit);
7826 }
7827
7828 return IRQ_HANDLED;
7829}
7830
7831static irqreturn_t sdma_interrupt(int irq, void *data)
7832{
7833 struct sdma_engine *sde = data;
7834 struct hfi1_devdata *dd = sde->dd;
7835 u64 status;
7836
7837#ifdef CONFIG_SDMA_VERBOSITY
7838 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7839 slashstrip(__FILE__), __LINE__, __func__);
7840 sdma_dumpstate(sde);
7841#endif
7842
7843 this_cpu_inc(*dd->int_counter);
7844
7845 /* This read_csr is really bad in the hot path */
7846 status = read_csr(dd,
7847 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7848 & sde->imask;
7849 if (likely(status)) {
7850 /* clear the interrupt(s) */
7851 write_csr(dd,
7852 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7853 status);
7854
7855 /* handle the interrupt(s) */
7856 sdma_engine_interrupt(sde, status);
7857 } else
7858 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7859 sde->this_idx);
7860
7861 return IRQ_HANDLED;
7862}
7863
7864/*
Dean Luickf4f30031c2015-10-26 10:28:44 -04007865 * Clear the receive interrupt, forcing the write and making sure
7866 * we have data from the chip, pushing everything in front of it
7867 * back to the host.
7868 */
7869static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7870{
7871 struct hfi1_devdata *dd = rcd->dd;
7872 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7873
7874 mmiowb(); /* make sure everything before is written */
7875 write_csr(dd, addr, rcd->imask);
7876 /* force the above write on the chip and get a value back */
7877 (void)read_csr(dd, addr);
7878}
7879
7880/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05007881void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007882{
7883 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7884}
7885
7886/* return non-zero if a packet is present */
7887static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7888{
7889 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7890 return (rcd->seq_cnt ==
7891 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7892
7893 /* else is RDMA rtail */
7894 return (rcd->head != get_rcvhdrtail(rcd));
7895}
7896
7897/*
7898 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
7899 * This routine will try to handle packets immediately (latency), but if
7900 * it finds too many, it will invoke the thread handler (bandwitdh). The
7901 * chip receive interupt is *not* cleared down until this or the thread (if
7902 * invoked) is finished. The intent is to avoid extra interrupts while we
7903 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04007904 */
7905static irqreturn_t receive_context_interrupt(int irq, void *data)
7906{
7907 struct hfi1_ctxtdata *rcd = data;
7908 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04007909 int disposition;
7910 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007911
7912 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7913 this_cpu_inc(*dd->int_counter);
7914
Dean Luickf4f30031c2015-10-26 10:28:44 -04007915 /* receive interrupt remains blocked while processing packets */
7916 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007917
Dean Luickf4f30031c2015-10-26 10:28:44 -04007918 /*
7919 * Too many packets were seen while processing packets in this
7920 * IRQ handler. Invoke the handler thread. The receive interrupt
7921 * remains blocked.
7922 */
7923 if (disposition == RCV_PKT_LIMIT)
7924 return IRQ_WAKE_THREAD;
7925
7926 /*
7927 * The packet processor detected no more packets. Clear the receive
7928 * interrupt and recheck for a packet packet that may have arrived
7929 * after the previous check and interrupt clear. If a packet arrived,
7930 * force another interrupt.
7931 */
7932 clear_recv_intr(rcd);
7933 present = check_packet_present(rcd);
7934 if (present)
7935 force_recv_intr(rcd);
7936
7937 return IRQ_HANDLED;
7938}
7939
7940/*
7941 * Receive packet thread handler. This expects to be invoked with the
7942 * receive interrupt still blocked.
7943 */
7944static irqreturn_t receive_context_thread(int irq, void *data)
7945{
7946 struct hfi1_ctxtdata *rcd = data;
7947 int present;
7948
7949 /* receive interrupt is still blocked from the IRQ handler */
7950 (void)rcd->do_interrupt(rcd, 1);
7951
7952 /*
7953 * The packet processor will only return if it detected no more
7954 * packets. Hold IRQs here so we can safely clear the interrupt and
7955 * recheck for a packet that may have arrived after the previous
7956 * check and the interrupt clear. If a packet arrived, force another
7957 * interrupt.
7958 */
7959 local_irq_disable();
7960 clear_recv_intr(rcd);
7961 present = check_packet_present(rcd);
7962 if (present)
7963 force_recv_intr(rcd);
7964 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04007965
7966 return IRQ_HANDLED;
7967}
7968
7969/* ========================================================================= */
7970
7971u32 read_physical_state(struct hfi1_devdata *dd)
7972{
7973 u64 reg;
7974
7975 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
7976 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
7977 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
7978}
7979
Jim Snowfb9036d2016-01-11 18:32:21 -05007980u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04007981{
7982 u64 reg;
7983
7984 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7985 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
7986 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
7987}
7988
7989static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
7990{
7991 u64 reg;
7992
7993 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
7994 /* clear current state, set new state */
7995 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
7996 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
7997 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
7998}
7999
8000/*
8001 * Use the 8051 to read a LCB CSR.
8002 */
8003static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8004{
8005 u32 regno;
8006 int ret;
8007
8008 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8009 if (acquire_lcb_access(dd, 0) == 0) {
8010 *data = read_csr(dd, addr);
8011 release_lcb_access(dd, 0);
8012 return 0;
8013 }
8014 return -EBUSY;
8015 }
8016
8017 /* register is an index of LCB registers: (offset - base) / 8 */
8018 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8019 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8020 if (ret != HCMD_SUCCESS)
8021 return -EBUSY;
8022 return 0;
8023}
8024
8025/*
8026 * Read an LCB CSR. Access may not be in host control, so check.
8027 * Return 0 on success, -EBUSY on failure.
8028 */
8029int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8030{
8031 struct hfi1_pportdata *ppd = dd->pport;
8032
8033 /* if up, go through the 8051 for the value */
8034 if (ppd->host_link_state & HLS_UP)
8035 return read_lcb_via_8051(dd, addr, data);
8036 /* if going up or down, no access */
8037 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8038 return -EBUSY;
8039 /* otherwise, host has access */
8040 *data = read_csr(dd, addr);
8041 return 0;
8042}
8043
8044/*
8045 * Use the 8051 to write a LCB CSR.
8046 */
8047static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8048{
Dean Luick3bf40d62015-11-06 20:07:04 -05008049 u32 regno;
8050 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008051
Dean Luick3bf40d62015-11-06 20:07:04 -05008052 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8053 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8054 if (acquire_lcb_access(dd, 0) == 0) {
8055 write_csr(dd, addr, data);
8056 release_lcb_access(dd, 0);
8057 return 0;
8058 }
8059 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008060 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008061
8062 /* register is an index of LCB registers: (offset - base) / 8 */
8063 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8064 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8065 if (ret != HCMD_SUCCESS)
8066 return -EBUSY;
8067 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008068}
8069
8070/*
8071 * Write an LCB CSR. Access may not be in host control, so check.
8072 * Return 0 on success, -EBUSY on failure.
8073 */
8074int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8075{
8076 struct hfi1_pportdata *ppd = dd->pport;
8077
8078 /* if up, go through the 8051 for the value */
8079 if (ppd->host_link_state & HLS_UP)
8080 return write_lcb_via_8051(dd, addr, data);
8081 /* if going up or down, no access */
8082 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8083 return -EBUSY;
8084 /* otherwise, host has access */
8085 write_csr(dd, addr, data);
8086 return 0;
8087}
8088
8089/*
8090 * Returns:
8091 * < 0 = Linux error, not able to get access
8092 * > 0 = 8051 command RETURN_CODE
8093 */
8094static int do_8051_command(
8095 struct hfi1_devdata *dd,
8096 u32 type,
8097 u64 in_data,
8098 u64 *out_data)
8099{
8100 u64 reg, completed;
8101 int return_code;
8102 unsigned long flags;
8103 unsigned long timeout;
8104
8105 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8106
8107 /*
8108 * Alternative to holding the lock for a long time:
8109 * - keep busy wait - have other users bounce off
8110 */
8111 spin_lock_irqsave(&dd->dc8051_lock, flags);
8112
8113 /* We can't send any commands to the 8051 if it's in reset */
8114 if (dd->dc_shutdown) {
8115 return_code = -ENODEV;
8116 goto fail;
8117 }
8118
8119 /*
8120 * If an 8051 host command timed out previously, then the 8051 is
8121 * stuck.
8122 *
8123 * On first timeout, attempt to reset and restart the entire DC
8124 * block (including 8051). (Is this too big of a hammer?)
8125 *
8126 * If the 8051 times out a second time, the reset did not bring it
8127 * back to healthy life. In that case, fail any subsequent commands.
8128 */
8129 if (dd->dc8051_timed_out) {
8130 if (dd->dc8051_timed_out > 1) {
8131 dd_dev_err(dd,
8132 "Previous 8051 host command timed out, skipping command %u\n",
8133 type);
8134 return_code = -ENXIO;
8135 goto fail;
8136 }
8137 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8138 dc_shutdown(dd);
8139 dc_start(dd);
8140 spin_lock_irqsave(&dd->dc8051_lock, flags);
8141 }
8142
8143 /*
8144 * If there is no timeout, then the 8051 command interface is
8145 * waiting for a command.
8146 */
8147
8148 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008149 * When writing a LCB CSR, out_data contains the full value to
8150 * to be written, while in_data contains the relative LCB
8151 * address in 7:0. Do the work here, rather than the caller,
8152 * of distrubting the write data to where it needs to go:
8153 *
8154 * Write data
8155 * 39:00 -> in_data[47:8]
8156 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8157 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8158 */
8159 if (type == HCMD_WRITE_LCB_CSR) {
8160 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8161 reg = ((((*out_data) >> 40) & 0xff) <<
8162 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8163 | ((((*out_data) >> 48) & 0xffff) <<
8164 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8165 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8166 }
8167
8168 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008169 * Do two writes: the first to stabilize the type and req_data, the
8170 * second to activate.
8171 */
8172 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8173 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8174 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8175 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8176 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8177 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8178 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8179
8180 /* wait for completion, alternate: interrupt */
8181 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8182 while (1) {
8183 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8184 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8185 if (completed)
8186 break;
8187 if (time_after(jiffies, timeout)) {
8188 dd->dc8051_timed_out++;
8189 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8190 if (out_data)
8191 *out_data = 0;
8192 return_code = -ETIMEDOUT;
8193 goto fail;
8194 }
8195 udelay(2);
8196 }
8197
8198 if (out_data) {
8199 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8200 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8201 if (type == HCMD_READ_LCB_CSR) {
8202 /* top 16 bits are in a different register */
8203 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8204 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8205 << (48
8206 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8207 }
8208 }
8209 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8210 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8211 dd->dc8051_timed_out = 0;
8212 /*
8213 * Clear command for next user.
8214 */
8215 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8216
8217fail:
8218 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8219
8220 return return_code;
8221}
8222
8223static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8224{
8225 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8226}
8227
8228static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8229 u8 lane_id, u32 config_data)
8230{
8231 u64 data;
8232 int ret;
8233
8234 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8235 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8236 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8237 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8238 if (ret != HCMD_SUCCESS) {
8239 dd_dev_err(dd,
8240 "load 8051 config: field id %d, lane %d, err %d\n",
8241 (int)field_id, (int)lane_id, ret);
8242 }
8243 return ret;
8244}
8245
8246/*
8247 * Read the 8051 firmware "registers". Use the RAM directly. Always
8248 * set the result, even on error.
8249 * Return 0 on success, -errno on failure
8250 */
8251static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8252 u32 *result)
8253{
8254 u64 big_data;
8255 u32 addr;
8256 int ret;
8257
8258 /* address start depends on the lane_id */
8259 if (lane_id < 4)
8260 addr = (4 * NUM_GENERAL_FIELDS)
8261 + (lane_id * 4 * NUM_LANE_FIELDS);
8262 else
8263 addr = 0;
8264 addr += field_id * 4;
8265
8266 /* read is in 8-byte chunks, hardware will truncate the address down */
8267 ret = read_8051_data(dd, addr, 8, &big_data);
8268
8269 if (ret == 0) {
8270 /* extract the 4 bytes we want */
8271 if (addr & 0x4)
8272 *result = (u32)(big_data >> 32);
8273 else
8274 *result = (u32)big_data;
8275 } else {
8276 *result = 0;
8277 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8278 __func__, lane_id, field_id);
8279 }
8280
8281 return ret;
8282}
8283
8284static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8285 u8 continuous)
8286{
8287 u32 frame;
8288
8289 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8290 | power_management << POWER_MANAGEMENT_SHIFT;
8291 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8292 GENERAL_CONFIG, frame);
8293}
8294
8295static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8296 u16 vl15buf, u8 crc_sizes)
8297{
8298 u32 frame;
8299
8300 frame = (u32)vau << VAU_SHIFT
8301 | (u32)z << Z_SHIFT
8302 | (u32)vcu << VCU_SHIFT
8303 | (u32)vl15buf << VL15BUF_SHIFT
8304 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8305 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8306 GENERAL_CONFIG, frame);
8307}
8308
8309static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8310 u8 *flag_bits, u16 *link_widths)
8311{
8312 u32 frame;
8313
8314 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8315 &frame);
8316 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8317 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8318 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8319}
8320
8321static int write_vc_local_link_width(struct hfi1_devdata *dd,
8322 u8 misc_bits,
8323 u8 flag_bits,
8324 u16 link_widths)
8325{
8326 u32 frame;
8327
8328 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8329 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8330 | (u32)link_widths << LINK_WIDTH_SHIFT;
8331 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8332 frame);
8333}
8334
8335static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8336 u8 device_rev)
8337{
8338 u32 frame;
8339
8340 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8341 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8342 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8343}
8344
8345static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8346 u8 *device_rev)
8347{
8348 u32 frame;
8349
8350 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8351 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8352 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8353 & REMOTE_DEVICE_REV_MASK;
8354}
8355
8356void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8357{
8358 u32 frame;
8359
8360 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8361 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8362 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8363}
8364
8365static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8366 u8 *continuous)
8367{
8368 u32 frame;
8369
8370 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8371 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8372 & POWER_MANAGEMENT_MASK;
8373 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8374 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8375}
8376
8377static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8378 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8379{
8380 u32 frame;
8381
8382 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8383 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8384 *z = (frame >> Z_SHIFT) & Z_MASK;
8385 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8386 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8387 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8388}
8389
8390static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8391 u8 *remote_tx_rate,
8392 u16 *link_widths)
8393{
8394 u32 frame;
8395
8396 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8397 &frame);
8398 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8399 & REMOTE_TX_RATE_MASK;
8400 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8401}
8402
8403static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8404{
8405 u32 frame;
8406
8407 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8408 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8409}
8410
8411static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8412{
8413 u32 frame;
8414
8415 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8416 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8417}
8418
8419static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8420{
8421 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8422}
8423
8424static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8425{
8426 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8427}
8428
8429void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8430{
8431 u32 frame;
8432 int ret;
8433
8434 *link_quality = 0;
8435 if (dd->pport->host_link_state & HLS_UP) {
8436 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8437 &frame);
8438 if (ret == 0)
8439 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8440 & LINK_QUALITY_MASK;
8441 }
8442}
8443
8444static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8445{
8446 u32 frame;
8447
8448 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8449 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8450}
8451
8452static int read_tx_settings(struct hfi1_devdata *dd,
8453 u8 *enable_lane_tx,
8454 u8 *tx_polarity_inversion,
8455 u8 *rx_polarity_inversion,
8456 u8 *max_rate)
8457{
8458 u32 frame;
8459 int ret;
8460
8461 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8462 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8463 & ENABLE_LANE_TX_MASK;
8464 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8465 & TX_POLARITY_INVERSION_MASK;
8466 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8467 & RX_POLARITY_INVERSION_MASK;
8468 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8469 return ret;
8470}
8471
8472static int write_tx_settings(struct hfi1_devdata *dd,
8473 u8 enable_lane_tx,
8474 u8 tx_polarity_inversion,
8475 u8 rx_polarity_inversion,
8476 u8 max_rate)
8477{
8478 u32 frame;
8479
8480 /* no need to mask, all variable sizes match field widths */
8481 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8482 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8483 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8484 | max_rate << MAX_RATE_SHIFT;
8485 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8486}
8487
8488static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8489{
8490 u32 frame, version, prod_id;
8491 int ret, lane;
8492
8493 /* 4 lanes */
8494 for (lane = 0; lane < 4; lane++) {
8495 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8496 if (ret) {
8497 dd_dev_err(
8498 dd,
8499 "Unable to read lane %d firmware details\n",
8500 lane);
8501 continue;
8502 }
8503 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8504 & SPICO_ROM_VERSION_MASK;
8505 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8506 & SPICO_ROM_PROD_ID_MASK;
8507 dd_dev_info(dd,
8508 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8509 lane, version, prod_id);
8510 }
8511}
8512
8513/*
8514 * Read an idle LCB message.
8515 *
8516 * Returns 0 on success, -EINVAL on error
8517 */
8518static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8519{
8520 int ret;
8521
8522 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8523 type, data_out);
8524 if (ret != HCMD_SUCCESS) {
8525 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8526 (u32)type, ret);
8527 return -EINVAL;
8528 }
8529 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8530 /* return only the payload as we already know the type */
8531 *data_out >>= IDLE_PAYLOAD_SHIFT;
8532 return 0;
8533}
8534
8535/*
8536 * Read an idle SMA message. To be done in response to a notification from
8537 * the 8051.
8538 *
8539 * Returns 0 on success, -EINVAL on error
8540 */
8541static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8542{
8543 return read_idle_message(dd,
8544 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8545}
8546
8547/*
8548 * Send an idle LCB message.
8549 *
8550 * Returns 0 on success, -EINVAL on error
8551 */
8552static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8553{
8554 int ret;
8555
8556 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8557 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8558 if (ret != HCMD_SUCCESS) {
8559 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8560 data, ret);
8561 return -EINVAL;
8562 }
8563 return 0;
8564}
8565
8566/*
8567 * Send an idle SMA message.
8568 *
8569 * Returns 0 on success, -EINVAL on error
8570 */
8571int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8572{
8573 u64 data;
8574
8575 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8576 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8577 return send_idle_message(dd, data);
8578}
8579
8580/*
8581 * Initialize the LCB then do a quick link up. This may or may not be
8582 * in loopback.
8583 *
8584 * return 0 on success, -errno on error
8585 */
8586static int do_quick_linkup(struct hfi1_devdata *dd)
8587{
8588 u64 reg;
8589 unsigned long timeout;
8590 int ret;
8591
8592 lcb_shutdown(dd, 0);
8593
8594 if (loopback) {
8595 /* LCB_CFG_LOOPBACK.VAL = 2 */
8596 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8597 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8598 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8599 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8600 }
8601
8602 /* start the LCBs */
8603 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8604 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8605
8606 /* simulator only loopback steps */
8607 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8608 /* LCB_CFG_RUN.EN = 1 */
8609 write_csr(dd, DC_LCB_CFG_RUN,
8610 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8611
8612 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8613 timeout = jiffies + msecs_to_jiffies(10);
8614 while (1) {
8615 reg = read_csr(dd,
8616 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8617 if (reg)
8618 break;
8619 if (time_after(jiffies, timeout)) {
8620 dd_dev_err(dd,
8621 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8622 return -ETIMEDOUT;
8623 }
8624 udelay(2);
8625 }
8626
8627 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8628 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8629 }
8630
8631 if (!loopback) {
8632 /*
8633 * When doing quick linkup and not in loopback, both
8634 * sides must be done with LCB set-up before either
8635 * starts the quick linkup. Put a delay here so that
8636 * both sides can be started and have a chance to be
8637 * done with LCB set up before resuming.
8638 */
8639 dd_dev_err(dd,
8640 "Pausing for peer to be finished with LCB set up\n");
8641 msleep(5000);
8642 dd_dev_err(dd,
8643 "Continuing with quick linkup\n");
8644 }
8645
8646 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8647 set_8051_lcb_access(dd);
8648
8649 /*
8650 * State "quick" LinkUp request sets the physical link state to
8651 * LinkUp without a verify capability sequence.
8652 * This state is in simulator v37 and later.
8653 */
8654 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8655 if (ret != HCMD_SUCCESS) {
8656 dd_dev_err(dd,
8657 "%s: set physical link state to quick LinkUp failed with return %d\n",
8658 __func__, ret);
8659
8660 set_host_lcb_access(dd);
8661 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8662
8663 if (ret >= 0)
8664 ret = -EINVAL;
8665 return ret;
8666 }
8667
8668 return 0; /* success */
8669}
8670
8671/*
8672 * Set the SerDes to internal loopback mode.
8673 * Returns 0 on success, -errno on error.
8674 */
8675static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8676{
8677 int ret;
8678
8679 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8680 if (ret == HCMD_SUCCESS)
8681 return 0;
8682 dd_dev_err(dd,
8683 "Set physical link state to SerDes Loopback failed with return %d\n",
8684 ret);
8685 if (ret >= 0)
8686 ret = -EINVAL;
8687 return ret;
8688}
8689
8690/*
8691 * Do all special steps to set up loopback.
8692 */
8693static int init_loopback(struct hfi1_devdata *dd)
8694{
8695 dd_dev_info(dd, "Entering loopback mode\n");
8696
8697 /* all loopbacks should disable self GUID check */
8698 write_csr(dd, DC_DC8051_CFG_MODE,
8699 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8700
8701 /*
8702 * The simulator has only one loopback option - LCB. Switch
8703 * to that option, which includes quick link up.
8704 *
8705 * Accept all valid loopback values.
8706 */
8707 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8708 && (loopback == LOOPBACK_SERDES
8709 || loopback == LOOPBACK_LCB
8710 || loopback == LOOPBACK_CABLE)) {
8711 loopback = LOOPBACK_LCB;
8712 quick_linkup = 1;
8713 return 0;
8714 }
8715
8716 /* handle serdes loopback */
8717 if (loopback == LOOPBACK_SERDES) {
8718 /* internal serdes loopack needs quick linkup on RTL */
8719 if (dd->icode == ICODE_RTL_SILICON)
8720 quick_linkup = 1;
8721 return set_serdes_loopback_mode(dd);
8722 }
8723
8724 /* LCB loopback - handled at poll time */
8725 if (loopback == LOOPBACK_LCB) {
8726 quick_linkup = 1; /* LCB is always quick linkup */
8727
8728 /* not supported in emulation due to emulation RTL changes */
8729 if (dd->icode == ICODE_FPGA_EMULATION) {
8730 dd_dev_err(dd,
8731 "LCB loopback not supported in emulation\n");
8732 return -EINVAL;
8733 }
8734 return 0;
8735 }
8736
8737 /* external cable loopback requires no extra steps */
8738 if (loopback == LOOPBACK_CABLE)
8739 return 0;
8740
8741 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8742 return -EINVAL;
8743}
8744
8745/*
8746 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8747 * used in the Verify Capability link width attribute.
8748 */
8749static u16 opa_to_vc_link_widths(u16 opa_widths)
8750{
8751 int i;
8752 u16 result = 0;
8753
8754 static const struct link_bits {
8755 u16 from;
8756 u16 to;
8757 } opa_link_xlate[] = {
8758 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8759 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8760 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8761 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8762 };
8763
8764 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8765 if (opa_widths & opa_link_xlate[i].from)
8766 result |= opa_link_xlate[i].to;
8767 }
8768 return result;
8769}
8770
8771/*
8772 * Set link attributes before moving to polling.
8773 */
8774static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8775{
8776 struct hfi1_devdata *dd = ppd->dd;
8777 u8 enable_lane_tx;
8778 u8 tx_polarity_inversion;
8779 u8 rx_polarity_inversion;
8780 int ret;
8781
8782 /* reset our fabric serdes to clear any lingering problems */
8783 fabric_serdes_reset(dd);
8784
8785 /* set the local tx rate - need to read-modify-write */
8786 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8787 &rx_polarity_inversion, &ppd->local_tx_rate);
8788 if (ret)
8789 goto set_local_link_attributes_fail;
8790
8791 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8792 /* set the tx rate to the fastest enabled */
8793 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8794 ppd->local_tx_rate = 1;
8795 else
8796 ppd->local_tx_rate = 0;
8797 } else {
8798 /* set the tx rate to all enabled */
8799 ppd->local_tx_rate = 0;
8800 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8801 ppd->local_tx_rate |= 2;
8802 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8803 ppd->local_tx_rate |= 1;
8804 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008805
8806 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008807 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8808 rx_polarity_inversion, ppd->local_tx_rate);
8809 if (ret != HCMD_SUCCESS)
8810 goto set_local_link_attributes_fail;
8811
8812 /*
8813 * DC supports continuous updates.
8814 */
8815 ret = write_vc_local_phy(dd, 0 /* no power management */,
8816 1 /* continuous updates */);
8817 if (ret != HCMD_SUCCESS)
8818 goto set_local_link_attributes_fail;
8819
8820 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8821 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8822 ppd->port_crc_mode_enabled);
8823 if (ret != HCMD_SUCCESS)
8824 goto set_local_link_attributes_fail;
8825
8826 ret = write_vc_local_link_width(dd, 0, 0,
8827 opa_to_vc_link_widths(ppd->link_width_enabled));
8828 if (ret != HCMD_SUCCESS)
8829 goto set_local_link_attributes_fail;
8830
8831 /* let peer know who we are */
8832 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8833 if (ret == HCMD_SUCCESS)
8834 return 0;
8835
8836set_local_link_attributes_fail:
8837 dd_dev_err(dd,
8838 "Failed to set local link attributes, return 0x%x\n",
8839 ret);
8840 return ret;
8841}
8842
8843/*
8844 * Call this to start the link. Schedule a retry if the cable is not
8845 * present or if unable to start polling. Do not do anything if the
8846 * link is disabled. Returns 0 if link is disabled or moved to polling
8847 */
8848int start_link(struct hfi1_pportdata *ppd)
8849{
8850 if (!ppd->link_enabled) {
8851 dd_dev_info(ppd->dd,
8852 "%s: stopping link start because link is disabled\n",
8853 __func__);
8854 return 0;
8855 }
8856 if (!ppd->driver_link_ready) {
8857 dd_dev_info(ppd->dd,
8858 "%s: stopping link start because driver is not ready\n",
8859 __func__);
8860 return 0;
8861 }
8862
8863 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8864 loopback == LOOPBACK_LCB ||
8865 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8866 return set_link_state(ppd, HLS_DN_POLL);
8867
8868 dd_dev_info(ppd->dd,
8869 "%s: stopping link start because no cable is present\n",
8870 __func__);
8871 return -EAGAIN;
8872}
8873
8874static void reset_qsfp(struct hfi1_pportdata *ppd)
8875{
8876 struct hfi1_devdata *dd = ppd->dd;
8877 u64 mask, qsfp_mask;
8878
8879 mask = (u64)QSFP_HFI0_RESET_N;
8880 qsfp_mask = read_csr(dd,
8881 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
8882 qsfp_mask |= mask;
8883 write_csr(dd,
8884 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
8885 qsfp_mask);
8886
8887 qsfp_mask = read_csr(dd,
8888 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
8889 qsfp_mask &= ~mask;
8890 write_csr(dd,
8891 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8892 qsfp_mask);
8893
8894 udelay(10);
8895
8896 qsfp_mask |= mask;
8897 write_csr(dd,
8898 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
8899 qsfp_mask);
8900}
8901
8902static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
8903 u8 *qsfp_interrupt_status)
8904{
8905 struct hfi1_devdata *dd = ppd->dd;
8906
8907 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
8908 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
8909 dd_dev_info(dd,
8910 "%s: QSFP cable on fire\n",
8911 __func__);
8912
8913 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
8914 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
8915 dd_dev_info(dd,
8916 "%s: QSFP cable temperature too low\n",
8917 __func__);
8918
8919 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
8920 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
8921 dd_dev_info(dd,
8922 "%s: QSFP supply voltage too high\n",
8923 __func__);
8924
8925 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
8926 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
8927 dd_dev_info(dd,
8928 "%s: QSFP supply voltage too low\n",
8929 __func__);
8930
8931 /* Byte 2 is vendor specific */
8932
8933 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
8934 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
8935 dd_dev_info(dd,
8936 "%s: Cable RX channel 1/2 power too high\n",
8937 __func__);
8938
8939 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
8940 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
8941 dd_dev_info(dd,
8942 "%s: Cable RX channel 1/2 power too low\n",
8943 __func__);
8944
8945 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
8946 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
8947 dd_dev_info(dd,
8948 "%s: Cable RX channel 3/4 power too high\n",
8949 __func__);
8950
8951 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
8952 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
8953 dd_dev_info(dd,
8954 "%s: Cable RX channel 3/4 power too low\n",
8955 __func__);
8956
8957 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
8958 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
8959 dd_dev_info(dd,
8960 "%s: Cable TX channel 1/2 bias too high\n",
8961 __func__);
8962
8963 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
8964 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
8965 dd_dev_info(dd,
8966 "%s: Cable TX channel 1/2 bias too low\n",
8967 __func__);
8968
8969 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
8970 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
8971 dd_dev_info(dd,
8972 "%s: Cable TX channel 3/4 bias too high\n",
8973 __func__);
8974
8975 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
8976 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
8977 dd_dev_info(dd,
8978 "%s: Cable TX channel 3/4 bias too low\n",
8979 __func__);
8980
8981 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
8982 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
8983 dd_dev_info(dd,
8984 "%s: Cable TX channel 1/2 power too high\n",
8985 __func__);
8986
8987 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
8988 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
8989 dd_dev_info(dd,
8990 "%s: Cable TX channel 1/2 power too low\n",
8991 __func__);
8992
8993 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
8994 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
8995 dd_dev_info(dd,
8996 "%s: Cable TX channel 3/4 power too high\n",
8997 __func__);
8998
8999 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9000 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9001 dd_dev_info(dd,
9002 "%s: Cable TX channel 3/4 power too low\n",
9003 __func__);
9004
9005 /* Bytes 9-10 and 11-12 are reserved */
9006 /* Bytes 13-15 are vendor specific */
9007
9008 return 0;
9009}
9010
9011static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
9012{
9013 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
9014
9015 return 0;
9016}
9017
9018static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
9019{
9020 struct hfi1_devdata *dd = ppd->dd;
9021 u8 qsfp_interrupt_status = 0;
9022
9023 if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
9024 != 1) {
9025 dd_dev_info(dd,
9026 "%s: Failed to read status of QSFP module\n",
9027 __func__);
9028 return -EIO;
9029 }
9030
9031 /* We don't care about alarms & warnings with a non-functional INT_N */
9032 if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
9033 do_pre_lni_host_behaviors(ppd);
9034
9035 return 0;
9036}
9037
9038/* This routine will only be scheduled if the QSFP module is present */
9039static void qsfp_event(struct work_struct *work)
9040{
9041 struct qsfp_data *qd;
9042 struct hfi1_pportdata *ppd;
9043 struct hfi1_devdata *dd;
9044
9045 qd = container_of(work, struct qsfp_data, qsfp_work);
9046 ppd = qd->ppd;
9047 dd = ppd->dd;
9048
9049 /* Sanity check */
9050 if (!qsfp_mod_present(ppd))
9051 return;
9052
9053 /*
9054 * Turn DC back on after cables has been
9055 * re-inserted. Up until now, the DC has been in
9056 * reset to save power.
9057 */
9058 dc_start(dd);
9059
9060 if (qd->cache_refresh_required) {
9061 msleep(3000);
9062 reset_qsfp(ppd);
9063
9064 /* Check for QSFP interrupt after t_init (SFF 8679)
9065 * + extra
9066 */
9067 msleep(3000);
9068 if (!qd->qsfp_interrupt_functional) {
9069 if (do_qsfp_intr_fallback(ppd) < 0)
9070 dd_dev_info(dd, "%s: QSFP fallback failed\n",
9071 __func__);
9072 ppd->driver_link_ready = 1;
9073 start_link(ppd);
9074 }
9075 }
9076
9077 if (qd->check_interrupt_flags) {
9078 u8 qsfp_interrupt_status[16] = {0,};
9079
9080 if (qsfp_read(ppd, dd->hfi1_id, 6,
9081 &qsfp_interrupt_status[0], 16) != 16) {
9082 dd_dev_info(dd,
9083 "%s: Failed to read status of QSFP module\n",
9084 __func__);
9085 } else {
9086 unsigned long flags;
9087 u8 data_status;
9088
9089 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9090 ppd->qsfp_info.check_interrupt_flags = 0;
9091 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9092 flags);
9093
9094 if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
9095 != 1) {
9096 dd_dev_info(dd,
9097 "%s: Failed to read status of QSFP module\n",
9098 __func__);
9099 }
9100 if (!(data_status & QSFP_DATA_NOT_READY)) {
9101 do_pre_lni_host_behaviors(ppd);
9102 start_link(ppd);
9103 } else
9104 handle_qsfp_error_conditions(ppd,
9105 qsfp_interrupt_status);
9106 }
9107 }
9108}
9109
9110void init_qsfp(struct hfi1_pportdata *ppd)
9111{
9112 struct hfi1_devdata *dd = ppd->dd;
9113 u64 qsfp_mask;
9114
9115 if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
Easwar Hariharan3c2f85b2015-10-26 10:28:31 -04009116 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009117 ppd->driver_link_ready = 1;
9118 return;
9119 }
9120
9121 ppd->qsfp_info.ppd = ppd;
9122 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
9123
9124 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9125 /* Clear current status to avoid spurious interrupts */
9126 write_csr(dd,
9127 dd->hfi1_id ?
9128 ASIC_QSFP2_CLEAR :
9129 ASIC_QSFP1_CLEAR,
9130 qsfp_mask);
9131
9132 /* Handle active low nature of INT_N and MODPRST_N pins */
9133 if (qsfp_mod_present(ppd))
9134 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9135 write_csr(dd,
9136 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9137 qsfp_mask);
9138
9139 /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
9140 qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
9141 write_csr(dd,
9142 dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9143 qsfp_mask);
9144
9145 if (qsfp_mod_present(ppd)) {
9146 msleep(3000);
9147 reset_qsfp(ppd);
9148
9149 /* Check for QSFP interrupt after t_init (SFF 8679)
9150 * + extra
9151 */
9152 msleep(3000);
9153 if (!ppd->qsfp_info.qsfp_interrupt_functional) {
9154 if (do_qsfp_intr_fallback(ppd) < 0)
9155 dd_dev_info(dd,
9156 "%s: QSFP fallback failed\n",
9157 __func__);
9158 ppd->driver_link_ready = 1;
9159 }
9160 }
9161}
9162
Dean Luickbbdeb332015-12-01 15:38:15 -05009163/*
9164 * Do a one-time initialize of the LCB block.
9165 */
9166static void init_lcb(struct hfi1_devdata *dd)
9167{
9168 /* the DC has been reset earlier in the driver load */
9169
9170 /* set LCB for cclk loopback on the port */
9171 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9172 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9173 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9174 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9175 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9176 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9177 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9178}
9179
Mike Marciniszyn77241052015-07-30 15:17:43 -04009180int bringup_serdes(struct hfi1_pportdata *ppd)
9181{
9182 struct hfi1_devdata *dd = ppd->dd;
9183 u64 guid;
9184 int ret;
9185
9186 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9187 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9188
9189 guid = ppd->guid;
9190 if (!guid) {
9191 if (dd->base_guid)
9192 guid = dd->base_guid + ppd->port - 1;
9193 ppd->guid = guid;
9194 }
9195
9196 /* the link defaults to enabled */
9197 ppd->link_enabled = 1;
9198 /* Set linkinit_reason on power up per OPA spec */
9199 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9200
Dean Luickbbdeb332015-12-01 15:38:15 -05009201 /* one-time init of the LCB */
9202 init_lcb(dd);
9203
Mike Marciniszyn77241052015-07-30 15:17:43 -04009204 if (loopback) {
9205 ret = init_loopback(dd);
9206 if (ret < 0)
9207 return ret;
9208 }
9209
9210 return start_link(ppd);
9211}
9212
9213void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9214{
9215 struct hfi1_devdata *dd = ppd->dd;
9216
9217 /*
9218 * Shut down the link and keep it down. First turn off that the
9219 * driver wants to allow the link to be up (driver_link_ready).
9220 * Then make sure the link is not automatically restarted
9221 * (link_enabled). Cancel any pending restart. And finally
9222 * go offline.
9223 */
9224 ppd->driver_link_ready = 0;
9225 ppd->link_enabled = 0;
9226
9227 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9228 OPA_LINKDOWN_REASON_SMA_DISABLED);
9229 set_link_state(ppd, HLS_DN_OFFLINE);
9230
9231 /* disable the port */
9232 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9233}
9234
9235static inline int init_cpu_counters(struct hfi1_devdata *dd)
9236{
9237 struct hfi1_pportdata *ppd;
9238 int i;
9239
9240 ppd = (struct hfi1_pportdata *)(dd + 1);
9241 for (i = 0; i < dd->num_pports; i++, ppd++) {
9242 ppd->ibport_data.rc_acks = NULL;
9243 ppd->ibport_data.rc_qacks = NULL;
9244 ppd->ibport_data.rc_acks = alloc_percpu(u64);
9245 ppd->ibport_data.rc_qacks = alloc_percpu(u64);
9246 ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64);
9247 if ((ppd->ibport_data.rc_acks == NULL) ||
9248 (ppd->ibport_data.rc_delayed_comp == NULL) ||
9249 (ppd->ibport_data.rc_qacks == NULL))
9250 return -ENOMEM;
9251 }
9252
9253 return 0;
9254}
9255
9256static const char * const pt_names[] = {
9257 "expected",
9258 "eager",
9259 "invalid"
9260};
9261
9262static const char *pt_name(u32 type)
9263{
9264 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9265}
9266
9267/*
9268 * index is the index into the receive array
9269 */
9270void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9271 u32 type, unsigned long pa, u16 order)
9272{
9273 u64 reg;
9274 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9275 (dd->kregbase + RCV_ARRAY));
9276
9277 if (!(dd->flags & HFI1_PRESENT))
9278 goto done;
9279
9280 if (type == PT_INVALID) {
9281 pa = 0;
9282 } else if (type > PT_INVALID) {
9283 dd_dev_err(dd,
9284 "unexpected receive array type %u for index %u, not handled\n",
9285 type, index);
9286 goto done;
9287 }
9288
9289 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9290 pt_name(type), index, pa, (unsigned long)order);
9291
9292#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9293 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9294 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9295 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9296 << RCV_ARRAY_RT_ADDR_SHIFT;
9297 writeq(reg, base + (index * 8));
9298
9299 if (type == PT_EAGER)
9300 /*
9301 * Eager entries are written one-by-one so we have to push them
9302 * after we write the entry.
9303 */
9304 flush_wc();
9305done:
9306 return;
9307}
9308
9309void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9310{
9311 struct hfi1_devdata *dd = rcd->dd;
9312 u32 i;
9313
9314 /* this could be optimized */
9315 for (i = rcd->eager_base; i < rcd->eager_base +
9316 rcd->egrbufs.alloced; i++)
9317 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9318
9319 for (i = rcd->expected_base;
9320 i < rcd->expected_base + rcd->expected_count; i++)
9321 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9322}
9323
9324int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9325 struct hfi1_ctxt_info *kinfo)
9326{
9327 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9328 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9329 return 0;
9330}
9331
9332struct hfi1_message_header *hfi1_get_msgheader(
9333 struct hfi1_devdata *dd, __le32 *rhf_addr)
9334{
9335 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9336
9337 return (struct hfi1_message_header *)
9338 (rhf_addr - dd->rhf_offset + offset);
9339}
9340
9341static const char * const ib_cfg_name_strings[] = {
9342 "HFI1_IB_CFG_LIDLMC",
9343 "HFI1_IB_CFG_LWID_DG_ENB",
9344 "HFI1_IB_CFG_LWID_ENB",
9345 "HFI1_IB_CFG_LWID",
9346 "HFI1_IB_CFG_SPD_ENB",
9347 "HFI1_IB_CFG_SPD",
9348 "HFI1_IB_CFG_RXPOL_ENB",
9349 "HFI1_IB_CFG_LREV_ENB",
9350 "HFI1_IB_CFG_LINKLATENCY",
9351 "HFI1_IB_CFG_HRTBT",
9352 "HFI1_IB_CFG_OP_VLS",
9353 "HFI1_IB_CFG_VL_HIGH_CAP",
9354 "HFI1_IB_CFG_VL_LOW_CAP",
9355 "HFI1_IB_CFG_OVERRUN_THRESH",
9356 "HFI1_IB_CFG_PHYERR_THRESH",
9357 "HFI1_IB_CFG_LINKDEFAULT",
9358 "HFI1_IB_CFG_PKEYS",
9359 "HFI1_IB_CFG_MTU",
9360 "HFI1_IB_CFG_LSTATE",
9361 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9362 "HFI1_IB_CFG_PMA_TICKS",
9363 "HFI1_IB_CFG_PORT"
9364};
9365
9366static const char *ib_cfg_name(int which)
9367{
9368 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9369 return "invalid";
9370 return ib_cfg_name_strings[which];
9371}
9372
9373int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9374{
9375 struct hfi1_devdata *dd = ppd->dd;
9376 int val = 0;
9377
9378 switch (which) {
9379 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9380 val = ppd->link_width_enabled;
9381 break;
9382 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9383 val = ppd->link_width_active;
9384 break;
9385 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9386 val = ppd->link_speed_enabled;
9387 break;
9388 case HFI1_IB_CFG_SPD: /* current Link speed */
9389 val = ppd->link_speed_active;
9390 break;
9391
9392 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9393 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9394 case HFI1_IB_CFG_LINKLATENCY:
9395 goto unimplemented;
9396
9397 case HFI1_IB_CFG_OP_VLS:
9398 val = ppd->vls_operational;
9399 break;
9400 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9401 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9402 break;
9403 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9404 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9405 break;
9406 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9407 val = ppd->overrun_threshold;
9408 break;
9409 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9410 val = ppd->phy_error_threshold;
9411 break;
9412 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9413 val = dd->link_default;
9414 break;
9415
9416 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9417 case HFI1_IB_CFG_PMA_TICKS:
9418 default:
9419unimplemented:
9420 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9421 dd_dev_info(
9422 dd,
9423 "%s: which %s: not implemented\n",
9424 __func__,
9425 ib_cfg_name(which));
9426 break;
9427 }
9428
9429 return val;
9430}
9431
9432/*
9433 * The largest MAD packet size.
9434 */
9435#define MAX_MAD_PACKET 2048
9436
9437/*
9438 * Return the maximum header bytes that can go on the _wire_
9439 * for this device. This count includes the ICRC which is
9440 * not part of the packet held in memory but it is appended
9441 * by the HW.
9442 * This is dependent on the device's receive header entry size.
9443 * HFI allows this to be set per-receive context, but the
9444 * driver presently enforces a global value.
9445 */
9446u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9447{
9448 /*
9449 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9450 * the Receive Header Entry Size minus the PBC (or RHF) size
9451 * plus one DW for the ICRC appended by HW.
9452 *
9453 * dd->rcd[0].rcvhdrqentsize is in DW.
9454 * We use rcd[0] as all context will have the same value. Also,
9455 * the first kernel context would have been allocated by now so
9456 * we are guaranteed a valid value.
9457 */
9458 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9459}
9460
9461/*
9462 * Set Send Length
9463 * @ppd - per port data
9464 *
9465 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9466 * registers compare against LRH.PktLen, so use the max bytes included
9467 * in the LRH.
9468 *
9469 * This routine changes all VL values except VL15, which it maintains at
9470 * the same value.
9471 */
9472static void set_send_length(struct hfi1_pportdata *ppd)
9473{
9474 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009475 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9476 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009477 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9478 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9479 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9480 int i;
9481
9482 for (i = 0; i < ppd->vls_supported; i++) {
9483 if (dd->vld[i].mtu > maxvlmtu)
9484 maxvlmtu = dd->vld[i].mtu;
9485 if (i <= 3)
9486 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9487 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9488 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9489 else
9490 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9491 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9492 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9493 }
9494 write_csr(dd, SEND_LEN_CHECK0, len1);
9495 write_csr(dd, SEND_LEN_CHECK1, len2);
9496 /* adjust kernel credit return thresholds based on new MTUs */
9497 /* all kernel receive contexts have the same hdrqentsize */
9498 for (i = 0; i < ppd->vls_supported; i++) {
9499 sc_set_cr_threshold(dd->vld[i].sc,
9500 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9501 dd->rcd[0]->rcvhdrqentsize));
9502 }
9503 sc_set_cr_threshold(dd->vld[15].sc,
9504 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9505 dd->rcd[0]->rcvhdrqentsize));
9506
9507 /* Adjust maximum MTU for the port in DC */
9508 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9509 (ilog2(maxvlmtu >> 8) + 1);
9510 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9511 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9512 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9513 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9514 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9515}
9516
9517static void set_lidlmc(struct hfi1_pportdata *ppd)
9518{
9519 int i;
9520 u64 sreg = 0;
9521 struct hfi1_devdata *dd = ppd->dd;
9522 u32 mask = ~((1U << ppd->lmc) - 1);
9523 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9524
9525 if (dd->hfi1_snoop.mode_flag)
9526 dd_dev_info(dd, "Set lid/lmc while snooping");
9527
9528 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9529 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9530 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9531 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9532 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9533 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9534 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9535
9536 /*
9537 * Iterate over all the send contexts and set their SLID check
9538 */
9539 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9540 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9541 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9542 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9543
9544 for (i = 0; i < dd->chip_send_contexts; i++) {
9545 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9546 i, (u32)sreg);
9547 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9548 }
9549
9550 /* Now we have to do the same thing for the sdma engines */
9551 sdma_update_lmc(dd, mask, ppd->lid);
9552}
9553
9554static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9555{
9556 unsigned long timeout;
9557 u32 curr_state;
9558
9559 timeout = jiffies + msecs_to_jiffies(msecs);
9560 while (1) {
9561 curr_state = read_physical_state(dd);
9562 if (curr_state == state)
9563 break;
9564 if (time_after(jiffies, timeout)) {
9565 dd_dev_err(dd,
9566 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9567 state, curr_state);
9568 return -ETIMEDOUT;
9569 }
9570 usleep_range(1950, 2050); /* sleep 2ms-ish */
9571 }
9572
9573 return 0;
9574}
9575
9576/*
9577 * Helper for set_link_state(). Do not call except from that routine.
9578 * Expects ppd->hls_mutex to be held.
9579 *
9580 * @rem_reason value to be sent to the neighbor
9581 *
9582 * LinkDownReasons only set if transition succeeds.
9583 */
9584static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9585{
9586 struct hfi1_devdata *dd = ppd->dd;
9587 u32 pstate, previous_state;
9588 u32 last_local_state;
9589 u32 last_remote_state;
9590 int ret;
9591 int do_transition;
9592 int do_wait;
9593
9594 previous_state = ppd->host_link_state;
9595 ppd->host_link_state = HLS_GOING_OFFLINE;
9596 pstate = read_physical_state(dd);
9597 if (pstate == PLS_OFFLINE) {
9598 do_transition = 0; /* in right state */
9599 do_wait = 0; /* ...no need to wait */
9600 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9601 do_transition = 0; /* in an offline transient state */
9602 do_wait = 1; /* ...wait for it to settle */
9603 } else {
9604 do_transition = 1; /* need to move to offline */
9605 do_wait = 1; /* ...will need to wait */
9606 }
9607
9608 if (do_transition) {
9609 ret = set_physical_link_state(dd,
9610 PLS_OFFLINE | (rem_reason << 8));
9611
9612 if (ret != HCMD_SUCCESS) {
9613 dd_dev_err(dd,
9614 "Failed to transition to Offline link state, return %d\n",
9615 ret);
9616 return -EINVAL;
9617 }
9618 if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE)
9619 ppd->offline_disabled_reason =
9620 OPA_LINKDOWN_REASON_TRANSIENT;
9621 }
9622
9623 if (do_wait) {
9624 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009625 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009626 if (ret < 0)
9627 return ret;
9628 }
9629
9630 /* make sure the logical state is also down */
9631 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9632
9633 /*
9634 * Now in charge of LCB - must be after the physical state is
9635 * offline.quiet and before host_link_state is changed.
9636 */
9637 set_host_lcb_access(dd);
9638 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9639 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9640
9641 /*
9642 * The LNI has a mandatory wait time after the physical state
9643 * moves to Offline.Quiet. The wait time may be different
9644 * depending on how the link went down. The 8051 firmware
9645 * will observe the needed wait time and only move to ready
9646 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009647 * is 6s, so wait that long and then at least 0.5s more for
9648 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009649 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009650 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009651 if (ret) {
9652 dd_dev_err(dd,
9653 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9654 /* state is really offline, so make it so */
9655 ppd->host_link_state = HLS_DN_OFFLINE;
9656 return ret;
9657 }
9658
9659 /*
9660 * The state is now offline and the 8051 is ready to accept host
9661 * requests.
9662 * - change our state
9663 * - notify others if we were previously in a linkup state
9664 */
9665 ppd->host_link_state = HLS_DN_OFFLINE;
9666 if (previous_state & HLS_UP) {
9667 /* went down while link was up */
9668 handle_linkup_change(dd, 0);
9669 } else if (previous_state
9670 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9671 /* went down while attempting link up */
9672 /* byte 1 of last_*_state is the failure reason */
9673 read_last_local_state(dd, &last_local_state);
9674 read_last_remote_state(dd, &last_remote_state);
9675 dd_dev_err(dd,
9676 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9677 last_local_state, last_remote_state);
9678 }
9679
9680 /* the active link width (downgrade) is 0 on link down */
9681 ppd->link_width_active = 0;
9682 ppd->link_width_downgrade_tx_active = 0;
9683 ppd->link_width_downgrade_rx_active = 0;
9684 ppd->current_egress_rate = 0;
9685 return 0;
9686}
9687
9688/* return the link state name */
9689static const char *link_state_name(u32 state)
9690{
9691 const char *name;
9692 int n = ilog2(state);
9693 static const char * const names[] = {
9694 [__HLS_UP_INIT_BP] = "INIT",
9695 [__HLS_UP_ARMED_BP] = "ARMED",
9696 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9697 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9698 [__HLS_DN_POLL_BP] = "POLL",
9699 [__HLS_DN_DISABLE_BP] = "DISABLE",
9700 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9701 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9702 [__HLS_GOING_UP_BP] = "GOING_UP",
9703 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9704 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9705 };
9706
9707 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9708 return name ? name : "unknown";
9709}
9710
9711/* return the link state reason name */
9712static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9713{
9714 if (state == HLS_UP_INIT) {
9715 switch (ppd->linkinit_reason) {
9716 case OPA_LINKINIT_REASON_LINKUP:
9717 return "(LINKUP)";
9718 case OPA_LINKINIT_REASON_FLAPPING:
9719 return "(FLAPPING)";
9720 case OPA_LINKINIT_OUTSIDE_POLICY:
9721 return "(OUTSIDE_POLICY)";
9722 case OPA_LINKINIT_QUARANTINED:
9723 return "(QUARANTINED)";
9724 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9725 return "(INSUFIC_CAPABILITY)";
9726 default:
9727 break;
9728 }
9729 }
9730 return "";
9731}
9732
9733/*
9734 * driver_physical_state - convert the driver's notion of a port's
9735 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9736 * Return -1 (converted to a u32) to indicate error.
9737 */
9738u32 driver_physical_state(struct hfi1_pportdata *ppd)
9739{
9740 switch (ppd->host_link_state) {
9741 case HLS_UP_INIT:
9742 case HLS_UP_ARMED:
9743 case HLS_UP_ACTIVE:
9744 return IB_PORTPHYSSTATE_LINKUP;
9745 case HLS_DN_POLL:
9746 return IB_PORTPHYSSTATE_POLLING;
9747 case HLS_DN_DISABLE:
9748 return IB_PORTPHYSSTATE_DISABLED;
9749 case HLS_DN_OFFLINE:
9750 return OPA_PORTPHYSSTATE_OFFLINE;
9751 case HLS_VERIFY_CAP:
9752 return IB_PORTPHYSSTATE_POLLING;
9753 case HLS_GOING_UP:
9754 return IB_PORTPHYSSTATE_POLLING;
9755 case HLS_GOING_OFFLINE:
9756 return OPA_PORTPHYSSTATE_OFFLINE;
9757 case HLS_LINK_COOLDOWN:
9758 return OPA_PORTPHYSSTATE_OFFLINE;
9759 case HLS_DN_DOWNDEF:
9760 default:
9761 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9762 ppd->host_link_state);
9763 return -1;
9764 }
9765}
9766
9767/*
9768 * driver_logical_state - convert the driver's notion of a port's
9769 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9770 * (converted to a u32) to indicate error.
9771 */
9772u32 driver_logical_state(struct hfi1_pportdata *ppd)
9773{
9774 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9775 return IB_PORT_DOWN;
9776
9777 switch (ppd->host_link_state & HLS_UP) {
9778 case HLS_UP_INIT:
9779 return IB_PORT_INIT;
9780 case HLS_UP_ARMED:
9781 return IB_PORT_ARMED;
9782 case HLS_UP_ACTIVE:
9783 return IB_PORT_ACTIVE;
9784 default:
9785 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9786 ppd->host_link_state);
9787 return -1;
9788 }
9789}
9790
9791void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9792 u8 neigh_reason, u8 rem_reason)
9793{
9794 if (ppd->local_link_down_reason.latest == 0 &&
9795 ppd->neigh_link_down_reason.latest == 0) {
9796 ppd->local_link_down_reason.latest = lcl_reason;
9797 ppd->neigh_link_down_reason.latest = neigh_reason;
9798 ppd->remote_link_down_reason = rem_reason;
9799 }
9800}
9801
9802/*
9803 * Change the physical and/or logical link state.
9804 *
9805 * Do not call this routine while inside an interrupt. It contains
9806 * calls to routines that can take multiple seconds to finish.
9807 *
9808 * Returns 0 on success, -errno on failure.
9809 */
9810int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9811{
9812 struct hfi1_devdata *dd = ppd->dd;
9813 struct ib_event event = {.device = NULL};
9814 int ret1, ret = 0;
9815 int was_up, is_down;
9816 int orig_new_state, poll_bounce;
9817
9818 mutex_lock(&ppd->hls_lock);
9819
9820 orig_new_state = state;
9821 if (state == HLS_DN_DOWNDEF)
9822 state = dd->link_default;
9823
9824 /* interpret poll -> poll as a link bounce */
9825 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9826 && state == HLS_DN_POLL;
9827
9828 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9829 link_state_name(ppd->host_link_state),
9830 link_state_name(orig_new_state),
9831 poll_bounce ? "(bounce) " : "",
9832 link_state_reason_name(ppd, state));
9833
9834 was_up = !!(ppd->host_link_state & HLS_UP);
9835
9836 /*
9837 * If we're going to a (HLS_*) link state that implies the logical
9838 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9839 * reset is_sm_config_started to 0.
9840 */
9841 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9842 ppd->is_sm_config_started = 0;
9843
9844 /*
9845 * Do nothing if the states match. Let a poll to poll link bounce
9846 * go through.
9847 */
9848 if (ppd->host_link_state == state && !poll_bounce)
9849 goto done;
9850
9851 switch (state) {
9852 case HLS_UP_INIT:
9853 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9854 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9855 /*
9856 * Quick link up jumps from polling to here.
9857 *
9858 * Whether in normal or loopback mode, the
9859 * simulator jumps from polling to link up.
9860 * Accept that here.
9861 */
9862 /* OK */;
9863 } else if (ppd->host_link_state != HLS_GOING_UP) {
9864 goto unexpected;
9865 }
9866
9867 ppd->host_link_state = HLS_UP_INIT;
9868 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9869 if (ret) {
9870 /* logical state didn't change, stay at going_up */
9871 ppd->host_link_state = HLS_GOING_UP;
9872 dd_dev_err(dd,
9873 "%s: logical state did not change to INIT\n",
9874 __func__);
9875 } else {
9876 /* clear old transient LINKINIT_REASON code */
9877 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9878 ppd->linkinit_reason =
9879 OPA_LINKINIT_REASON_LINKUP;
9880
9881 /* enable the port */
9882 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9883
9884 handle_linkup_change(dd, 1);
9885 }
9886 break;
9887 case HLS_UP_ARMED:
9888 if (ppd->host_link_state != HLS_UP_INIT)
9889 goto unexpected;
9890
9891 ppd->host_link_state = HLS_UP_ARMED;
9892 set_logical_state(dd, LSTATE_ARMED);
9893 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9894 if (ret) {
9895 /* logical state didn't change, stay at init */
9896 ppd->host_link_state = HLS_UP_INIT;
9897 dd_dev_err(dd,
9898 "%s: logical state did not change to ARMED\n",
9899 __func__);
9900 }
9901 /*
9902 * The simulator does not currently implement SMA messages,
9903 * so neighbor_normal is not set. Set it here when we first
9904 * move to Armed.
9905 */
9906 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9907 ppd->neighbor_normal = 1;
9908 break;
9909 case HLS_UP_ACTIVE:
9910 if (ppd->host_link_state != HLS_UP_ARMED)
9911 goto unexpected;
9912
9913 ppd->host_link_state = HLS_UP_ACTIVE;
9914 set_logical_state(dd, LSTATE_ACTIVE);
9915 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9916 if (ret) {
9917 /* logical state didn't change, stay at armed */
9918 ppd->host_link_state = HLS_UP_ARMED;
9919 dd_dev_err(dd,
9920 "%s: logical state did not change to ACTIVE\n",
9921 __func__);
9922 } else {
9923
9924 /* tell all engines to go running */
9925 sdma_all_running(dd);
9926
9927 /* Signal the IB layer that the port has went active */
9928 event.device = &dd->verbs_dev.ibdev;
9929 event.element.port_num = ppd->port;
9930 event.event = IB_EVENT_PORT_ACTIVE;
9931 }
9932 break;
9933 case HLS_DN_POLL:
9934 if ((ppd->host_link_state == HLS_DN_DISABLE ||
9935 ppd->host_link_state == HLS_DN_OFFLINE) &&
9936 dd->dc_shutdown)
9937 dc_start(dd);
9938 /* Hand LED control to the DC */
9939 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
9940
9941 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9942 u8 tmp = ppd->link_enabled;
9943
9944 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9945 if (ret) {
9946 ppd->link_enabled = tmp;
9947 break;
9948 }
9949 ppd->remote_link_down_reason = 0;
9950
9951 if (ppd->driver_link_ready)
9952 ppd->link_enabled = 1;
9953 }
9954
Jim Snowfb9036d2016-01-11 18:32:21 -05009955 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009956 ret = set_local_link_attributes(ppd);
9957 if (ret)
9958 break;
9959
9960 ppd->port_error_action = 0;
9961 ppd->host_link_state = HLS_DN_POLL;
9962
9963 if (quick_linkup) {
9964 /* quick linkup does not go into polling */
9965 ret = do_quick_linkup(dd);
9966 } else {
9967 ret1 = set_physical_link_state(dd, PLS_POLLING);
9968 if (ret1 != HCMD_SUCCESS) {
9969 dd_dev_err(dd,
9970 "Failed to transition to Polling link state, return 0x%x\n",
9971 ret1);
9972 ret = -EINVAL;
9973 }
9974 }
9975 ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
9976 /*
9977 * If an error occurred above, go back to offline. The
9978 * caller may reschedule another attempt.
9979 */
9980 if (ret)
9981 goto_offline(ppd, 0);
9982 break;
9983 case HLS_DN_DISABLE:
9984 /* link is disabled */
9985 ppd->link_enabled = 0;
9986
9987 /* allow any state to transition to disabled */
9988
9989 /* must transition to offline first */
9990 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9991 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9992 if (ret)
9993 break;
9994 ppd->remote_link_down_reason = 0;
9995 }
9996
9997 ret1 = set_physical_link_state(dd, PLS_DISABLED);
9998 if (ret1 != HCMD_SUCCESS) {
9999 dd_dev_err(dd,
10000 "Failed to transition to Disabled link state, return 0x%x\n",
10001 ret1);
10002 ret = -EINVAL;
10003 break;
10004 }
10005 ppd->host_link_state = HLS_DN_DISABLE;
10006 dc_shutdown(dd);
10007 break;
10008 case HLS_DN_OFFLINE:
10009 if (ppd->host_link_state == HLS_DN_DISABLE)
10010 dc_start(dd);
10011
10012 /* allow any state to transition to offline */
10013 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10014 if (!ret)
10015 ppd->remote_link_down_reason = 0;
10016 break;
10017 case HLS_VERIFY_CAP:
10018 if (ppd->host_link_state != HLS_DN_POLL)
10019 goto unexpected;
10020 ppd->host_link_state = HLS_VERIFY_CAP;
10021 break;
10022 case HLS_GOING_UP:
10023 if (ppd->host_link_state != HLS_VERIFY_CAP)
10024 goto unexpected;
10025
10026 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10027 if (ret1 != HCMD_SUCCESS) {
10028 dd_dev_err(dd,
10029 "Failed to transition to link up state, return 0x%x\n",
10030 ret1);
10031 ret = -EINVAL;
10032 break;
10033 }
10034 ppd->host_link_state = HLS_GOING_UP;
10035 break;
10036
10037 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10038 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10039 default:
10040 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10041 __func__, state);
10042 ret = -EINVAL;
10043 break;
10044 }
10045
10046 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10047 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10048
10049 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10050 ppd->neigh_link_down_reason.sma == 0) {
10051 ppd->local_link_down_reason.sma =
10052 ppd->local_link_down_reason.latest;
10053 ppd->neigh_link_down_reason.sma =
10054 ppd->neigh_link_down_reason.latest;
10055 }
10056
10057 goto done;
10058
10059unexpected:
10060 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10061 __func__, link_state_name(ppd->host_link_state),
10062 link_state_name(state));
10063 ret = -EINVAL;
10064
10065done:
10066 mutex_unlock(&ppd->hls_lock);
10067
10068 if (event.device)
10069 ib_dispatch_event(&event);
10070
10071 return ret;
10072}
10073
10074int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10075{
10076 u64 reg;
10077 int ret = 0;
10078
10079 switch (which) {
10080 case HFI1_IB_CFG_LIDLMC:
10081 set_lidlmc(ppd);
10082 break;
10083 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10084 /*
10085 * The VL Arbitrator high limit is sent in units of 4k
10086 * bytes, while HFI stores it in units of 64 bytes.
10087 */
10088 val *= 4096/64;
10089 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10090 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10091 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10092 break;
10093 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10094 /* HFI only supports POLL as the default link down state */
10095 if (val != HLS_DN_POLL)
10096 ret = -EINVAL;
10097 break;
10098 case HFI1_IB_CFG_OP_VLS:
10099 if (ppd->vls_operational != val) {
10100 ppd->vls_operational = val;
10101 if (!ppd->port)
10102 ret = -EINVAL;
10103 else
10104 ret = sdma_map_init(
10105 ppd->dd,
10106 ppd->port - 1,
10107 val,
10108 NULL);
10109 }
10110 break;
10111 /*
10112 * For link width, link width downgrade, and speed enable, always AND
10113 * the setting with what is actually supported. This has two benefits.
10114 * First, enabled can't have unsupported values, no matter what the
10115 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10116 * "fill in with your supported value" have all the bits in the
10117 * field set, so simply ANDing with supported has the desired result.
10118 */
10119 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10120 ppd->link_width_enabled = val & ppd->link_width_supported;
10121 break;
10122 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10123 ppd->link_width_downgrade_enabled =
10124 val & ppd->link_width_downgrade_supported;
10125 break;
10126 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10127 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10128 break;
10129 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10130 /*
10131 * HFI does not follow IB specs, save this value
10132 * so we can report it, if asked.
10133 */
10134 ppd->overrun_threshold = val;
10135 break;
10136 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10137 /*
10138 * HFI does not follow IB specs, save this value
10139 * so we can report it, if asked.
10140 */
10141 ppd->phy_error_threshold = val;
10142 break;
10143
10144 case HFI1_IB_CFG_MTU:
10145 set_send_length(ppd);
10146 break;
10147
10148 case HFI1_IB_CFG_PKEYS:
10149 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10150 set_partition_keys(ppd);
10151 break;
10152
10153 default:
10154 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10155 dd_dev_info(ppd->dd,
10156 "%s: which %s, val 0x%x: not implemented\n",
10157 __func__, ib_cfg_name(which), val);
10158 break;
10159 }
10160 return ret;
10161}
10162
10163/* begin functions related to vl arbitration table caching */
10164static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10165{
10166 int i;
10167
10168 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10169 VL_ARB_LOW_PRIO_TABLE_SIZE);
10170 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10171 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10172
10173 /*
10174 * Note that we always return values directly from the
10175 * 'vl_arb_cache' (and do no CSR reads) in response to a
10176 * 'Get(VLArbTable)'. This is obviously correct after a
10177 * 'Set(VLArbTable)', since the cache will then be up to
10178 * date. But it's also correct prior to any 'Set(VLArbTable)'
10179 * since then both the cache, and the relevant h/w registers
10180 * will be zeroed.
10181 */
10182
10183 for (i = 0; i < MAX_PRIO_TABLE; i++)
10184 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10185}
10186
10187/*
10188 * vl_arb_lock_cache
10189 *
10190 * All other vl_arb_* functions should be called only after locking
10191 * the cache.
10192 */
10193static inline struct vl_arb_cache *
10194vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10195{
10196 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10197 return NULL;
10198 spin_lock(&ppd->vl_arb_cache[idx].lock);
10199 return &ppd->vl_arb_cache[idx];
10200}
10201
10202static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10203{
10204 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10205}
10206
10207static void vl_arb_get_cache(struct vl_arb_cache *cache,
10208 struct ib_vl_weight_elem *vl)
10209{
10210 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10211}
10212
10213static void vl_arb_set_cache(struct vl_arb_cache *cache,
10214 struct ib_vl_weight_elem *vl)
10215{
10216 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10217}
10218
10219static int vl_arb_match_cache(struct vl_arb_cache *cache,
10220 struct ib_vl_weight_elem *vl)
10221{
10222 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10223}
10224/* end functions related to vl arbitration table caching */
10225
10226static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10227 u32 size, struct ib_vl_weight_elem *vl)
10228{
10229 struct hfi1_devdata *dd = ppd->dd;
10230 u64 reg;
10231 unsigned int i, is_up = 0;
10232 int drain, ret = 0;
10233
10234 mutex_lock(&ppd->hls_lock);
10235
10236 if (ppd->host_link_state & HLS_UP)
10237 is_up = 1;
10238
10239 drain = !is_ax(dd) && is_up;
10240
10241 if (drain)
10242 /*
10243 * Before adjusting VL arbitration weights, empty per-VL
10244 * FIFOs, otherwise a packet whose VL weight is being
10245 * set to 0 could get stuck in a FIFO with no chance to
10246 * egress.
10247 */
10248 ret = stop_drain_data_vls(dd);
10249
10250 if (ret) {
10251 dd_dev_err(
10252 dd,
10253 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10254 __func__);
10255 goto err;
10256 }
10257
10258 for (i = 0; i < size; i++, vl++) {
10259 /*
10260 * NOTE: The low priority shift and mask are used here, but
10261 * they are the same for both the low and high registers.
10262 */
10263 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10264 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10265 | (((u64)vl->weight
10266 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10267 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10268 write_csr(dd, target + (i * 8), reg);
10269 }
10270 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10271
10272 if (drain)
10273 open_fill_data_vls(dd); /* reopen all VLs */
10274
10275err:
10276 mutex_unlock(&ppd->hls_lock);
10277
10278 return ret;
10279}
10280
10281/*
10282 * Read one credit merge VL register.
10283 */
10284static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10285 struct vl_limit *vll)
10286{
10287 u64 reg = read_csr(dd, csr);
10288
10289 vll->dedicated = cpu_to_be16(
10290 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10291 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10292 vll->shared = cpu_to_be16(
10293 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10294 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10295}
10296
10297/*
10298 * Read the current credit merge limits.
10299 */
10300static int get_buffer_control(struct hfi1_devdata *dd,
10301 struct buffer_control *bc, u16 *overall_limit)
10302{
10303 u64 reg;
10304 int i;
10305
10306 /* not all entries are filled in */
10307 memset(bc, 0, sizeof(*bc));
10308
10309 /* OPA and HFI have a 1-1 mapping */
10310 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10311 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10312
10313 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10314 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10315
10316 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10317 bc->overall_shared_limit = cpu_to_be16(
10318 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10319 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10320 if (overall_limit)
10321 *overall_limit = (reg
10322 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10323 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10324 return sizeof(struct buffer_control);
10325}
10326
10327static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10328{
10329 u64 reg;
10330 int i;
10331
10332 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10333 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10334 for (i = 0; i < sizeof(u64); i++) {
10335 u8 byte = *(((u8 *)&reg) + i);
10336
10337 dp->vlnt[2 * i] = byte & 0xf;
10338 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10339 }
10340
10341 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10342 for (i = 0; i < sizeof(u64); i++) {
10343 u8 byte = *(((u8 *)&reg) + i);
10344
10345 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10346 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10347 }
10348 return sizeof(struct sc2vlnt);
10349}
10350
10351static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10352 struct ib_vl_weight_elem *vl)
10353{
10354 unsigned int i;
10355
10356 for (i = 0; i < nelems; i++, vl++) {
10357 vl->vl = 0xf;
10358 vl->weight = 0;
10359 }
10360}
10361
10362static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10363{
10364 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10365 DC_SC_VL_VAL(15_0,
10366 0, dp->vlnt[0] & 0xf,
10367 1, dp->vlnt[1] & 0xf,
10368 2, dp->vlnt[2] & 0xf,
10369 3, dp->vlnt[3] & 0xf,
10370 4, dp->vlnt[4] & 0xf,
10371 5, dp->vlnt[5] & 0xf,
10372 6, dp->vlnt[6] & 0xf,
10373 7, dp->vlnt[7] & 0xf,
10374 8, dp->vlnt[8] & 0xf,
10375 9, dp->vlnt[9] & 0xf,
10376 10, dp->vlnt[10] & 0xf,
10377 11, dp->vlnt[11] & 0xf,
10378 12, dp->vlnt[12] & 0xf,
10379 13, dp->vlnt[13] & 0xf,
10380 14, dp->vlnt[14] & 0xf,
10381 15, dp->vlnt[15] & 0xf));
10382 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10383 DC_SC_VL_VAL(31_16,
10384 16, dp->vlnt[16] & 0xf,
10385 17, dp->vlnt[17] & 0xf,
10386 18, dp->vlnt[18] & 0xf,
10387 19, dp->vlnt[19] & 0xf,
10388 20, dp->vlnt[20] & 0xf,
10389 21, dp->vlnt[21] & 0xf,
10390 22, dp->vlnt[22] & 0xf,
10391 23, dp->vlnt[23] & 0xf,
10392 24, dp->vlnt[24] & 0xf,
10393 25, dp->vlnt[25] & 0xf,
10394 26, dp->vlnt[26] & 0xf,
10395 27, dp->vlnt[27] & 0xf,
10396 28, dp->vlnt[28] & 0xf,
10397 29, dp->vlnt[29] & 0xf,
10398 30, dp->vlnt[30] & 0xf,
10399 31, dp->vlnt[31] & 0xf));
10400}
10401
10402static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10403 u16 limit)
10404{
10405 if (limit != 0)
10406 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10407 what, (int)limit, idx);
10408}
10409
10410/* change only the shared limit portion of SendCmGLobalCredit */
10411static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10412{
10413 u64 reg;
10414
10415 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10416 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10417 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10418 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10419}
10420
10421/* change only the total credit limit portion of SendCmGLobalCredit */
10422static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10423{
10424 u64 reg;
10425
10426 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10427 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10428 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10429 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10430}
10431
10432/* set the given per-VL shared limit */
10433static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10434{
10435 u64 reg;
10436 u32 addr;
10437
10438 if (vl < TXE_NUM_DATA_VL)
10439 addr = SEND_CM_CREDIT_VL + (8 * vl);
10440 else
10441 addr = SEND_CM_CREDIT_VL15;
10442
10443 reg = read_csr(dd, addr);
10444 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10445 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10446 write_csr(dd, addr, reg);
10447}
10448
10449/* set the given per-VL dedicated limit */
10450static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10451{
10452 u64 reg;
10453 u32 addr;
10454
10455 if (vl < TXE_NUM_DATA_VL)
10456 addr = SEND_CM_CREDIT_VL + (8 * vl);
10457 else
10458 addr = SEND_CM_CREDIT_VL15;
10459
10460 reg = read_csr(dd, addr);
10461 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10462 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10463 write_csr(dd, addr, reg);
10464}
10465
10466/* spin until the given per-VL status mask bits clear */
10467static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10468 const char *which)
10469{
10470 unsigned long timeout;
10471 u64 reg;
10472
10473 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10474 while (1) {
10475 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10476
10477 if (reg == 0)
10478 return; /* success */
10479 if (time_after(jiffies, timeout))
10480 break; /* timed out */
10481 udelay(1);
10482 }
10483
10484 dd_dev_err(dd,
10485 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10486 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10487 /*
10488 * If this occurs, it is likely there was a credit loss on the link.
10489 * The only recovery from that is a link bounce.
10490 */
10491 dd_dev_err(dd,
10492 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10493}
10494
10495/*
10496 * The number of credits on the VLs may be changed while everything
10497 * is "live", but the following algorithm must be followed due to
10498 * how the hardware is actually implemented. In particular,
10499 * Return_Credit_Status[] is the only correct status check.
10500 *
10501 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10502 * set Global_Shared_Credit_Limit = 0
10503 * use_all_vl = 1
10504 * mask0 = all VLs that are changing either dedicated or shared limits
10505 * set Shared_Limit[mask0] = 0
10506 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10507 * if (changing any dedicated limit)
10508 * mask1 = all VLs that are lowering dedicated limits
10509 * lower Dedicated_Limit[mask1]
10510 * spin until Return_Credit_Status[mask1] == 0
10511 * raise Dedicated_Limits
10512 * raise Shared_Limits
10513 * raise Global_Shared_Credit_Limit
10514 *
10515 * lower = if the new limit is lower, set the limit to the new value
10516 * raise = if the new limit is higher than the current value (may be changed
10517 * earlier in the algorithm), set the new limit to the new value
10518 */
10519static int set_buffer_control(struct hfi1_devdata *dd,
10520 struct buffer_control *new_bc)
10521{
10522 u64 changing_mask, ld_mask, stat_mask;
10523 int change_count;
10524 int i, use_all_mask;
10525 int this_shared_changing;
10526 /*
10527 * A0: add the variable any_shared_limit_changing below and in the
10528 * algorithm above. If removing A0 support, it can be removed.
10529 */
10530 int any_shared_limit_changing;
10531 struct buffer_control cur_bc;
10532 u8 changing[OPA_MAX_VLS];
10533 u8 lowering_dedicated[OPA_MAX_VLS];
10534 u16 cur_total;
10535 u32 new_total = 0;
10536 const u64 all_mask =
10537 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10538 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10539 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10540 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10541 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10542 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10543 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10544 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10545 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10546
10547#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10548#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10549
10550
10551 /* find the new total credits, do sanity check on unused VLs */
10552 for (i = 0; i < OPA_MAX_VLS; i++) {
10553 if (valid_vl(i)) {
10554 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10555 continue;
10556 }
10557 nonzero_msg(dd, i, "dedicated",
10558 be16_to_cpu(new_bc->vl[i].dedicated));
10559 nonzero_msg(dd, i, "shared",
10560 be16_to_cpu(new_bc->vl[i].shared));
10561 new_bc->vl[i].dedicated = 0;
10562 new_bc->vl[i].shared = 0;
10563 }
10564 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010565
Mike Marciniszyn77241052015-07-30 15:17:43 -040010566 /* fetch the current values */
10567 get_buffer_control(dd, &cur_bc, &cur_total);
10568
10569 /*
10570 * Create the masks we will use.
10571 */
10572 memset(changing, 0, sizeof(changing));
10573 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10574 /* NOTE: Assumes that the individual VL bits are adjacent and in
10575 increasing order */
10576 stat_mask =
10577 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10578 changing_mask = 0;
10579 ld_mask = 0;
10580 change_count = 0;
10581 any_shared_limit_changing = 0;
10582 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10583 if (!valid_vl(i))
10584 continue;
10585 this_shared_changing = new_bc->vl[i].shared
10586 != cur_bc.vl[i].shared;
10587 if (this_shared_changing)
10588 any_shared_limit_changing = 1;
10589 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10590 || this_shared_changing) {
10591 changing[i] = 1;
10592 changing_mask |= stat_mask;
10593 change_count++;
10594 }
10595 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10596 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10597 lowering_dedicated[i] = 1;
10598 ld_mask |= stat_mask;
10599 }
10600 }
10601
10602 /* bracket the credit change with a total adjustment */
10603 if (new_total > cur_total)
10604 set_global_limit(dd, new_total);
10605
10606 /*
10607 * Start the credit change algorithm.
10608 */
10609 use_all_mask = 0;
10610 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010611 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10612 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010613 set_global_shared(dd, 0);
10614 cur_bc.overall_shared_limit = 0;
10615 use_all_mask = 1;
10616 }
10617
10618 for (i = 0; i < NUM_USABLE_VLS; i++) {
10619 if (!valid_vl(i))
10620 continue;
10621
10622 if (changing[i]) {
10623 set_vl_shared(dd, i, 0);
10624 cur_bc.vl[i].shared = 0;
10625 }
10626 }
10627
10628 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10629 "shared");
10630
10631 if (change_count > 0) {
10632 for (i = 0; i < NUM_USABLE_VLS; i++) {
10633 if (!valid_vl(i))
10634 continue;
10635
10636 if (lowering_dedicated[i]) {
10637 set_vl_dedicated(dd, i,
10638 be16_to_cpu(new_bc->vl[i].dedicated));
10639 cur_bc.vl[i].dedicated =
10640 new_bc->vl[i].dedicated;
10641 }
10642 }
10643
10644 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10645
10646 /* now raise all dedicated that are going up */
10647 for (i = 0; i < NUM_USABLE_VLS; i++) {
10648 if (!valid_vl(i))
10649 continue;
10650
10651 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10652 be16_to_cpu(cur_bc.vl[i].dedicated))
10653 set_vl_dedicated(dd, i,
10654 be16_to_cpu(new_bc->vl[i].dedicated));
10655 }
10656 }
10657
10658 /* next raise all shared that are going up */
10659 for (i = 0; i < NUM_USABLE_VLS; i++) {
10660 if (!valid_vl(i))
10661 continue;
10662
10663 if (be16_to_cpu(new_bc->vl[i].shared) >
10664 be16_to_cpu(cur_bc.vl[i].shared))
10665 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10666 }
10667
10668 /* finally raise the global shared */
10669 if (be16_to_cpu(new_bc->overall_shared_limit) >
10670 be16_to_cpu(cur_bc.overall_shared_limit))
10671 set_global_shared(dd,
10672 be16_to_cpu(new_bc->overall_shared_limit));
10673
10674 /* bracket the credit change with a total adjustment */
10675 if (new_total < cur_total)
10676 set_global_limit(dd, new_total);
10677 return 0;
10678}
10679
10680/*
10681 * Read the given fabric manager table. Return the size of the
10682 * table (in bytes) on success, and a negative error code on
10683 * failure.
10684 */
10685int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10686
10687{
10688 int size;
10689 struct vl_arb_cache *vlc;
10690
10691 switch (which) {
10692 case FM_TBL_VL_HIGH_ARB:
10693 size = 256;
10694 /*
10695 * OPA specifies 128 elements (of 2 bytes each), though
10696 * HFI supports only 16 elements in h/w.
10697 */
10698 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10699 vl_arb_get_cache(vlc, t);
10700 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10701 break;
10702 case FM_TBL_VL_LOW_ARB:
10703 size = 256;
10704 /*
10705 * OPA specifies 128 elements (of 2 bytes each), though
10706 * HFI supports only 16 elements in h/w.
10707 */
10708 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10709 vl_arb_get_cache(vlc, t);
10710 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10711 break;
10712 case FM_TBL_BUFFER_CONTROL:
10713 size = get_buffer_control(ppd->dd, t, NULL);
10714 break;
10715 case FM_TBL_SC2VLNT:
10716 size = get_sc2vlnt(ppd->dd, t);
10717 break;
10718 case FM_TBL_VL_PREEMPT_ELEMS:
10719 size = 256;
10720 /* OPA specifies 128 elements, of 2 bytes each */
10721 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10722 break;
10723 case FM_TBL_VL_PREEMPT_MATRIX:
10724 size = 256;
10725 /*
10726 * OPA specifies that this is the same size as the VL
10727 * arbitration tables (i.e., 256 bytes).
10728 */
10729 break;
10730 default:
10731 return -EINVAL;
10732 }
10733 return size;
10734}
10735
10736/*
10737 * Write the given fabric manager table.
10738 */
10739int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10740{
10741 int ret = 0;
10742 struct vl_arb_cache *vlc;
10743
10744 switch (which) {
10745 case FM_TBL_VL_HIGH_ARB:
10746 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10747 if (vl_arb_match_cache(vlc, t)) {
10748 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10749 break;
10750 }
10751 vl_arb_set_cache(vlc, t);
10752 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10753 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10754 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10755 break;
10756 case FM_TBL_VL_LOW_ARB:
10757 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10758 if (vl_arb_match_cache(vlc, t)) {
10759 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10760 break;
10761 }
10762 vl_arb_set_cache(vlc, t);
10763 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10764 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10765 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10766 break;
10767 case FM_TBL_BUFFER_CONTROL:
10768 ret = set_buffer_control(ppd->dd, t);
10769 break;
10770 case FM_TBL_SC2VLNT:
10771 set_sc2vlnt(ppd->dd, t);
10772 break;
10773 default:
10774 ret = -EINVAL;
10775 }
10776 return ret;
10777}
10778
10779/*
10780 * Disable all data VLs.
10781 *
10782 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10783 */
10784static int disable_data_vls(struct hfi1_devdata *dd)
10785{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010786 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010787 return 1;
10788
10789 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10790
10791 return 0;
10792}
10793
10794/*
10795 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10796 * Just re-enables all data VLs (the "fill" part happens
10797 * automatically - the name was chosen for symmetry with
10798 * stop_drain_data_vls()).
10799 *
10800 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10801 */
10802int open_fill_data_vls(struct hfi1_devdata *dd)
10803{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010804 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010805 return 1;
10806
10807 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10808
10809 return 0;
10810}
10811
10812/*
10813 * drain_data_vls() - assumes that disable_data_vls() has been called,
10814 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10815 * engines to drop to 0.
10816 */
10817static void drain_data_vls(struct hfi1_devdata *dd)
10818{
10819 sc_wait(dd);
10820 sdma_wait(dd);
10821 pause_for_credit_return(dd);
10822}
10823
10824/*
10825 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10826 *
10827 * Use open_fill_data_vls() to resume using data VLs. This pair is
10828 * meant to be used like this:
10829 *
10830 * stop_drain_data_vls(dd);
10831 * // do things with per-VL resources
10832 * open_fill_data_vls(dd);
10833 */
10834int stop_drain_data_vls(struct hfi1_devdata *dd)
10835{
10836 int ret;
10837
10838 ret = disable_data_vls(dd);
10839 if (ret == 0)
10840 drain_data_vls(dd);
10841
10842 return ret;
10843}
10844
10845/*
10846 * Convert a nanosecond time to a cclock count. No matter how slow
10847 * the cclock, a non-zero ns will always have a non-zero result.
10848 */
10849u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10850{
10851 u32 cclocks;
10852
10853 if (dd->icode == ICODE_FPGA_EMULATION)
10854 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10855 else /* simulation pretends to be ASIC */
10856 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10857 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
10858 cclocks = 1;
10859 return cclocks;
10860}
10861
10862/*
10863 * Convert a cclock count to nanoseconds. Not matter how slow
10864 * the cclock, a non-zero cclocks will always have a non-zero result.
10865 */
10866u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10867{
10868 u32 ns;
10869
10870 if (dd->icode == ICODE_FPGA_EMULATION)
10871 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10872 else /* simulation pretends to be ASIC */
10873 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10874 if (cclocks && !ns)
10875 ns = 1;
10876 return ns;
10877}
10878
10879/*
10880 * Dynamically adjust the receive interrupt timeout for a context based on
10881 * incoming packet rate.
10882 *
10883 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10884 */
10885static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10886{
10887 struct hfi1_devdata *dd = rcd->dd;
10888 u32 timeout = rcd->rcvavail_timeout;
10889
10890 /*
10891 * This algorithm doubles or halves the timeout depending on whether
10892 * the number of packets received in this interrupt were less than or
10893 * greater equal the interrupt count.
10894 *
10895 * The calculations below do not allow a steady state to be achieved.
10896 * Only at the endpoints it is possible to have an unchanging
10897 * timeout.
10898 */
10899 if (npkts < rcv_intr_count) {
10900 /*
10901 * Not enough packets arrived before the timeout, adjust
10902 * timeout downward.
10903 */
10904 if (timeout < 2) /* already at minimum? */
10905 return;
10906 timeout >>= 1;
10907 } else {
10908 /*
10909 * More than enough packets arrived before the timeout, adjust
10910 * timeout upward.
10911 */
10912 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10913 return;
10914 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10915 }
10916
10917 rcd->rcvavail_timeout = timeout;
10918 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10919 been verified to be in range */
10920 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10921 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10922}
10923
10924void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
10925 u32 intr_adjust, u32 npkts)
10926{
10927 struct hfi1_devdata *dd = rcd->dd;
10928 u64 reg;
10929 u32 ctxt = rcd->ctxt;
10930
10931 /*
10932 * Need to write timeout register before updating RcvHdrHead to ensure
10933 * that a new value is used when the HW decides to restart counting.
10934 */
10935 if (intr_adjust)
10936 adjust_rcv_timeout(rcd, npkts);
10937 if (updegr) {
10938 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
10939 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
10940 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
10941 }
10942 mmiowb();
10943 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
10944 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
10945 << RCV_HDR_HEAD_HEAD_SHIFT);
10946 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
10947 mmiowb();
10948}
10949
10950u32 hdrqempty(struct hfi1_ctxtdata *rcd)
10951{
10952 u32 head, tail;
10953
10954 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
10955 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
10956
10957 if (rcd->rcvhdrtail_kvaddr)
10958 tail = get_rcvhdrtail(rcd);
10959 else
10960 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
10961
10962 return head == tail;
10963}
10964
10965/*
10966 * Context Control and Receive Array encoding for buffer size:
10967 * 0x0 invalid
10968 * 0x1 4 KB
10969 * 0x2 8 KB
10970 * 0x3 16 KB
10971 * 0x4 32 KB
10972 * 0x5 64 KB
10973 * 0x6 128 KB
10974 * 0x7 256 KB
10975 * 0x8 512 KB (Receive Array only)
10976 * 0x9 1 MB (Receive Array only)
10977 * 0xa 2 MB (Receive Array only)
10978 *
10979 * 0xB-0xF - reserved (Receive Array only)
10980 *
10981 *
10982 * This routine assumes that the value has already been sanity checked.
10983 */
10984static u32 encoded_size(u32 size)
10985{
10986 switch (size) {
10987 case 4*1024: return 0x1;
10988 case 8*1024: return 0x2;
10989 case 16*1024: return 0x3;
10990 case 32*1024: return 0x4;
10991 case 64*1024: return 0x5;
10992 case 128*1024: return 0x6;
10993 case 256*1024: return 0x7;
10994 case 512*1024: return 0x8;
10995 case 1*1024*1024: return 0x9;
10996 case 2*1024*1024: return 0xa;
10997 }
10998 return 0x1; /* if invalid, go with the minimum size */
10999}
11000
11001void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11002{
11003 struct hfi1_ctxtdata *rcd;
11004 u64 rcvctrl, reg;
11005 int did_enable = 0;
11006
11007 rcd = dd->rcd[ctxt];
11008 if (!rcd)
11009 return;
11010
11011 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11012
11013 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11014 /* if the context already enabled, don't do the extra steps */
11015 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11016 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11017 /* reset the tail and hdr addresses, and sequence count */
11018 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11019 rcd->rcvhdrq_phys);
11020 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11021 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11022 rcd->rcvhdrqtailaddr_phys);
11023 rcd->seq_cnt = 1;
11024
11025 /* reset the cached receive header queue head value */
11026 rcd->head = 0;
11027
11028 /*
11029 * Zero the receive header queue so we don't get false
11030 * positives when checking the sequence number. The
11031 * sequence numbers could land exactly on the same spot.
11032 * E.g. a rcd restart before the receive header wrapped.
11033 */
11034 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11035
11036 /* starting timeout */
11037 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11038
11039 /* enable the context */
11040 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11041
11042 /* clean the egr buffer size first */
11043 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11044 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11045 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11046 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11047
11048 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11049 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11050 did_enable = 1;
11051
11052 /* zero RcvEgrIndexHead */
11053 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11054
11055 /* set eager count and base index */
11056 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11057 & RCV_EGR_CTRL_EGR_CNT_MASK)
11058 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11059 (((rcd->eager_base >> RCV_SHIFT)
11060 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11061 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11062 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11063
11064 /*
11065 * Set TID (expected) count and base index.
11066 * rcd->expected_count is set to individual RcvArray entries,
11067 * not pairs, and the CSR takes a pair-count in groups of
11068 * four, so divide by 8.
11069 */
11070 reg = (((rcd->expected_count >> RCV_SHIFT)
11071 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11072 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11073 (((rcd->expected_base >> RCV_SHIFT)
11074 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11075 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11076 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011077 if (ctxt == HFI1_CTRL_CTXT)
11078 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011079 }
11080 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11081 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011082 /*
11083 * When receive context is being disabled turn on tail
11084 * update with a dummy tail address and then disable
11085 * receive context.
11086 */
11087 if (dd->rcvhdrtail_dummy_physaddr) {
11088 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11089 dd->rcvhdrtail_dummy_physaddr);
11090 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11091 }
11092
Mike Marciniszyn77241052015-07-30 15:17:43 -040011093 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11094 }
11095 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11096 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11097 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11098 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11099 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11100 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11101 if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11102 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11103 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11104 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11105 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11106 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11107 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11108 /* In one-packet-per-eager mode, the size comes from
11109 the RcvArray entry. */
11110 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11111 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11112 }
11113 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11114 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11115 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11116 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11117 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11118 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11119 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11120 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11121 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11122 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11123 rcd->rcvctrl = rcvctrl;
11124 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11125 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11126
11127 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11128 if (did_enable
11129 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11130 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11131 if (reg != 0) {
11132 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11133 ctxt, reg);
11134 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11135 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11136 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11137 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11138 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11139 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11140 ctxt, reg, reg == 0 ? "not" : "still");
11141 }
11142 }
11143
11144 if (did_enable) {
11145 /*
11146 * The interrupt timeout and count must be set after
11147 * the context is enabled to take effect.
11148 */
11149 /* set interrupt timeout */
11150 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11151 (u64)rcd->rcvavail_timeout <<
11152 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11153
11154 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11155 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11156 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11157 }
11158
11159 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11160 /*
11161 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011162 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11163 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011164 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011165 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11166 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011167}
11168
11169u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11170 u64 **cntrp)
11171{
11172 int ret;
11173 u64 val = 0;
11174
11175 if (namep) {
11176 ret = dd->cntrnameslen;
11177 if (pos != 0) {
11178 dd_dev_err(dd, "read_cntrs does not support indexing");
11179 return 0;
11180 }
11181 *namep = dd->cntrnames;
11182 } else {
11183 const struct cntr_entry *entry;
11184 int i, j;
11185
11186 ret = (dd->ndevcntrs) * sizeof(u64);
11187 if (pos != 0) {
11188 dd_dev_err(dd, "read_cntrs does not support indexing");
11189 return 0;
11190 }
11191
11192 /* Get the start of the block of counters */
11193 *cntrp = dd->cntrs;
11194
11195 /*
11196 * Now go and fill in each counter in the block.
11197 */
11198 for (i = 0; i < DEV_CNTR_LAST; i++) {
11199 entry = &dev_cntrs[i];
11200 hfi1_cdbg(CNTR, "reading %s", entry->name);
11201 if (entry->flags & CNTR_DISABLED) {
11202 /* Nothing */
11203 hfi1_cdbg(CNTR, "\tDisabled\n");
11204 } else {
11205 if (entry->flags & CNTR_VL) {
11206 hfi1_cdbg(CNTR, "\tPer VL\n");
11207 for (j = 0; j < C_VL_COUNT; j++) {
11208 val = entry->rw_cntr(entry,
11209 dd, j,
11210 CNTR_MODE_R,
11211 0);
11212 hfi1_cdbg(
11213 CNTR,
11214 "\t\tRead 0x%llx for %d\n",
11215 val, j);
11216 dd->cntrs[entry->offset + j] =
11217 val;
11218 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011219 } else if (entry->flags & CNTR_SDMA) {
11220 hfi1_cdbg(CNTR,
11221 "\t Per SDMA Engine\n");
11222 for (j = 0; j < dd->chip_sdma_engines;
11223 j++) {
11224 val =
11225 entry->rw_cntr(entry, dd, j,
11226 CNTR_MODE_R, 0);
11227 hfi1_cdbg(CNTR,
11228 "\t\tRead 0x%llx for %d\n",
11229 val, j);
11230 dd->cntrs[entry->offset + j] =
11231 val;
11232 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011233 } else {
11234 val = entry->rw_cntr(entry, dd,
11235 CNTR_INVALID_VL,
11236 CNTR_MODE_R, 0);
11237 dd->cntrs[entry->offset] = val;
11238 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11239 }
11240 }
11241 }
11242 }
11243 return ret;
11244}
11245
11246/*
11247 * Used by sysfs to create files for hfi stats to read
11248 */
11249u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11250 char **namep, u64 **cntrp)
11251{
11252 int ret;
11253 u64 val = 0;
11254
11255 if (namep) {
11256 ret = dd->portcntrnameslen;
11257 if (pos != 0) {
11258 dd_dev_err(dd, "index not supported");
11259 return 0;
11260 }
11261 *namep = dd->portcntrnames;
11262 } else {
11263 const struct cntr_entry *entry;
11264 struct hfi1_pportdata *ppd;
11265 int i, j;
11266
11267 ret = (dd->nportcntrs) * sizeof(u64);
11268 if (pos != 0) {
11269 dd_dev_err(dd, "indexing not supported");
11270 return 0;
11271 }
11272 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11273 *cntrp = ppd->cntrs;
11274
11275 for (i = 0; i < PORT_CNTR_LAST; i++) {
11276 entry = &port_cntrs[i];
11277 hfi1_cdbg(CNTR, "reading %s", entry->name);
11278 if (entry->flags & CNTR_DISABLED) {
11279 /* Nothing */
11280 hfi1_cdbg(CNTR, "\tDisabled\n");
11281 continue;
11282 }
11283
11284 if (entry->flags & CNTR_VL) {
11285 hfi1_cdbg(CNTR, "\tPer VL");
11286 for (j = 0; j < C_VL_COUNT; j++) {
11287 val = entry->rw_cntr(entry, ppd, j,
11288 CNTR_MODE_R,
11289 0);
11290 hfi1_cdbg(
11291 CNTR,
11292 "\t\tRead 0x%llx for %d",
11293 val, j);
11294 ppd->cntrs[entry->offset + j] = val;
11295 }
11296 } else {
11297 val = entry->rw_cntr(entry, ppd,
11298 CNTR_INVALID_VL,
11299 CNTR_MODE_R,
11300 0);
11301 ppd->cntrs[entry->offset] = val;
11302 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11303 }
11304 }
11305 }
11306 return ret;
11307}
11308
11309static void free_cntrs(struct hfi1_devdata *dd)
11310{
11311 struct hfi1_pportdata *ppd;
11312 int i;
11313
11314 if (dd->synth_stats_timer.data)
11315 del_timer_sync(&dd->synth_stats_timer);
11316 dd->synth_stats_timer.data = 0;
11317 ppd = (struct hfi1_pportdata *)(dd + 1);
11318 for (i = 0; i < dd->num_pports; i++, ppd++) {
11319 kfree(ppd->cntrs);
11320 kfree(ppd->scntrs);
11321 free_percpu(ppd->ibport_data.rc_acks);
11322 free_percpu(ppd->ibport_data.rc_qacks);
11323 free_percpu(ppd->ibport_data.rc_delayed_comp);
11324 ppd->cntrs = NULL;
11325 ppd->scntrs = NULL;
11326 ppd->ibport_data.rc_acks = NULL;
11327 ppd->ibport_data.rc_qacks = NULL;
11328 ppd->ibport_data.rc_delayed_comp = NULL;
11329 }
11330 kfree(dd->portcntrnames);
11331 dd->portcntrnames = NULL;
11332 kfree(dd->cntrs);
11333 dd->cntrs = NULL;
11334 kfree(dd->scntrs);
11335 dd->scntrs = NULL;
11336 kfree(dd->cntrnames);
11337 dd->cntrnames = NULL;
11338}
11339
11340#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11341#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11342
11343static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11344 u64 *psval, void *context, int vl)
11345{
11346 u64 val;
11347 u64 sval = *psval;
11348
11349 if (entry->flags & CNTR_DISABLED) {
11350 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11351 return 0;
11352 }
11353
11354 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11355
11356 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11357
11358 /* If its a synthetic counter there is more work we need to do */
11359 if (entry->flags & CNTR_SYNTH) {
11360 if (sval == CNTR_MAX) {
11361 /* No need to read already saturated */
11362 return CNTR_MAX;
11363 }
11364
11365 if (entry->flags & CNTR_32BIT) {
11366 /* 32bit counters can wrap multiple times */
11367 u64 upper = sval >> 32;
11368 u64 lower = (sval << 32) >> 32;
11369
11370 if (lower > val) { /* hw wrapped */
11371 if (upper == CNTR_32BIT_MAX)
11372 val = CNTR_MAX;
11373 else
11374 upper++;
11375 }
11376
11377 if (val != CNTR_MAX)
11378 val = (upper << 32) | val;
11379
11380 } else {
11381 /* If we rolled we are saturated */
11382 if ((val < sval) || (val > CNTR_MAX))
11383 val = CNTR_MAX;
11384 }
11385 }
11386
11387 *psval = val;
11388
11389 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11390
11391 return val;
11392}
11393
11394static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11395 struct cntr_entry *entry,
11396 u64 *psval, void *context, int vl, u64 data)
11397{
11398 u64 val;
11399
11400 if (entry->flags & CNTR_DISABLED) {
11401 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11402 return 0;
11403 }
11404
11405 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11406
11407 if (entry->flags & CNTR_SYNTH) {
11408 *psval = data;
11409 if (entry->flags & CNTR_32BIT) {
11410 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11411 (data << 32) >> 32);
11412 val = data; /* return the full 64bit value */
11413 } else {
11414 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11415 data);
11416 }
11417 } else {
11418 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11419 }
11420
11421 *psval = val;
11422
11423 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11424
11425 return val;
11426}
11427
11428u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11429{
11430 struct cntr_entry *entry;
11431 u64 *sval;
11432
11433 entry = &dev_cntrs[index];
11434 sval = dd->scntrs + entry->offset;
11435
11436 if (vl != CNTR_INVALID_VL)
11437 sval += vl;
11438
11439 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11440}
11441
11442u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11443{
11444 struct cntr_entry *entry;
11445 u64 *sval;
11446
11447 entry = &dev_cntrs[index];
11448 sval = dd->scntrs + entry->offset;
11449
11450 if (vl != CNTR_INVALID_VL)
11451 sval += vl;
11452
11453 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11454}
11455
11456u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11457{
11458 struct cntr_entry *entry;
11459 u64 *sval;
11460
11461 entry = &port_cntrs[index];
11462 sval = ppd->scntrs + entry->offset;
11463
11464 if (vl != CNTR_INVALID_VL)
11465 sval += vl;
11466
11467 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11468 (index <= C_RCV_HDR_OVF_LAST)) {
11469 /* We do not want to bother for disabled contexts */
11470 return 0;
11471 }
11472
11473 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11474}
11475
11476u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11477{
11478 struct cntr_entry *entry;
11479 u64 *sval;
11480
11481 entry = &port_cntrs[index];
11482 sval = ppd->scntrs + entry->offset;
11483
11484 if (vl != CNTR_INVALID_VL)
11485 sval += vl;
11486
11487 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11488 (index <= C_RCV_HDR_OVF_LAST)) {
11489 /* We do not want to bother for disabled contexts */
11490 return 0;
11491 }
11492
11493 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11494}
11495
11496static void update_synth_timer(unsigned long opaque)
11497{
11498 u64 cur_tx;
11499 u64 cur_rx;
11500 u64 total_flits;
11501 u8 update = 0;
11502 int i, j, vl;
11503 struct hfi1_pportdata *ppd;
11504 struct cntr_entry *entry;
11505
11506 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11507
11508 /*
11509 * Rather than keep beating on the CSRs pick a minimal set that we can
11510 * check to watch for potential roll over. We can do this by looking at
11511 * the number of flits sent/recv. If the total flits exceeds 32bits then
11512 * we have to iterate all the counters and update.
11513 */
11514 entry = &dev_cntrs[C_DC_RCV_FLITS];
11515 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11516
11517 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11518 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11519
11520 hfi1_cdbg(
11521 CNTR,
11522 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11523 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11524
11525 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11526 /*
11527 * May not be strictly necessary to update but it won't hurt and
11528 * simplifies the logic here.
11529 */
11530 update = 1;
11531 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11532 dd->unit);
11533 } else {
11534 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11535 hfi1_cdbg(CNTR,
11536 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11537 total_flits, (u64)CNTR_32BIT_MAX);
11538 if (total_flits >= CNTR_32BIT_MAX) {
11539 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11540 dd->unit);
11541 update = 1;
11542 }
11543 }
11544
11545 if (update) {
11546 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11547 for (i = 0; i < DEV_CNTR_LAST; i++) {
11548 entry = &dev_cntrs[i];
11549 if (entry->flags & CNTR_VL) {
11550 for (vl = 0; vl < C_VL_COUNT; vl++)
11551 read_dev_cntr(dd, i, vl);
11552 } else {
11553 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11554 }
11555 }
11556 ppd = (struct hfi1_pportdata *)(dd + 1);
11557 for (i = 0; i < dd->num_pports; i++, ppd++) {
11558 for (j = 0; j < PORT_CNTR_LAST; j++) {
11559 entry = &port_cntrs[j];
11560 if (entry->flags & CNTR_VL) {
11561 for (vl = 0; vl < C_VL_COUNT; vl++)
11562 read_port_cntr(ppd, j, vl);
11563 } else {
11564 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11565 }
11566 }
11567 }
11568
11569 /*
11570 * We want the value in the register. The goal is to keep track
11571 * of the number of "ticks" not the counter value. In other
11572 * words if the register rolls we want to notice it and go ahead
11573 * and force an update.
11574 */
11575 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11576 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11577 CNTR_MODE_R, 0);
11578
11579 entry = &dev_cntrs[C_DC_RCV_FLITS];
11580 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11581 CNTR_MODE_R, 0);
11582
11583 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11584 dd->unit, dd->last_tx, dd->last_rx);
11585
11586 } else {
11587 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11588 }
11589
11590mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11591}
11592
11593#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11594static int init_cntrs(struct hfi1_devdata *dd)
11595{
Dean Luickc024c552016-01-11 18:30:57 -050011596 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011597 size_t sz;
11598 char *p;
11599 char name[C_MAX_NAME];
11600 struct hfi1_pportdata *ppd;
11601
11602 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011603 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11604 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011605
11606 /***********************/
11607 /* per device counters */
11608 /***********************/
11609
11610 /* size names and determine how many we have*/
11611 dd->ndevcntrs = 0;
11612 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011613
11614 for (i = 0; i < DEV_CNTR_LAST; i++) {
11615 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11616 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11617 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11618 continue;
11619 }
11620
11621 if (dev_cntrs[i].flags & CNTR_VL) {
11622 hfi1_dbg_early("\tProcessing VL cntr\n");
Dean Luickc024c552016-01-11 18:30:57 -050011623 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011624 for (j = 0; j < C_VL_COUNT; j++) {
11625 memset(name, '\0', C_MAX_NAME);
11626 snprintf(name, C_MAX_NAME, "%s%d",
11627 dev_cntrs[i].name,
11628 vl_from_idx(j));
11629 sz += strlen(name);
11630 sz++;
11631 hfi1_dbg_early("\t\t%s\n", name);
11632 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011633 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011634 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11635 hfi1_dbg_early(
11636 "\tProcessing per SDE counters chip enginers %u\n",
11637 dd->chip_sdma_engines);
Dean Luickc024c552016-01-11 18:30:57 -050011638 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011639 for (j = 0; j < dd->chip_sdma_engines; j++) {
11640 memset(name, '\0', C_MAX_NAME);
11641 snprintf(name, C_MAX_NAME, "%s%d",
11642 dev_cntrs[i].name, j);
11643 sz += strlen(name);
11644 sz++;
11645 hfi1_dbg_early("\t\t%s\n", name);
11646 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011647 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011648 } else {
11649 /* +1 for newline */
11650 sz += strlen(dev_cntrs[i].name) + 1;
Dean Luickc024c552016-01-11 18:30:57 -050011651 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011652 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011653 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11654 }
11655 }
11656
11657 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011658 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011659 if (!dd->cntrs)
11660 goto bail;
11661
Dean Luickc024c552016-01-11 18:30:57 -050011662 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011663 if (!dd->scntrs)
11664 goto bail;
11665
11666
11667 /* allocate space for the counter names */
11668 dd->cntrnameslen = sz;
11669 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11670 if (!dd->cntrnames)
11671 goto bail;
11672
11673 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011674 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011675 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11676 /* Nothing */
11677 } else {
11678 if (dev_cntrs[i].flags & CNTR_VL) {
11679 for (j = 0; j < C_VL_COUNT; j++) {
11680 memset(name, '\0', C_MAX_NAME);
11681 snprintf(name, C_MAX_NAME, "%s%d",
11682 dev_cntrs[i].name,
11683 vl_from_idx(j));
11684 memcpy(p, name, strlen(name));
11685 p += strlen(name);
11686 *p++ = '\n';
11687 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011688 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11689 for (j = 0; j < TXE_NUM_SDMA_ENGINES;
11690 j++) {
11691 memset(name, '\0', C_MAX_NAME);
11692 snprintf(name, C_MAX_NAME, "%s%d",
11693 dev_cntrs[i].name, j);
11694 memcpy(p, name, strlen(name));
11695 p += strlen(name);
11696 *p++ = '\n';
11697 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011698 } else {
11699 memcpy(p, dev_cntrs[i].name,
11700 strlen(dev_cntrs[i].name));
11701 p += strlen(dev_cntrs[i].name);
11702 *p++ = '\n';
11703 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011704 }
11705 }
11706
11707 /*********************/
11708 /* per port counters */
11709 /*********************/
11710
11711 /*
11712 * Go through the counters for the overflows and disable the ones we
11713 * don't need. This varies based on platform so we need to do it
11714 * dynamically here.
11715 */
11716 rcv_ctxts = dd->num_rcv_contexts;
11717 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11718 i <= C_RCV_HDR_OVF_LAST; i++) {
11719 port_cntrs[i].flags |= CNTR_DISABLED;
11720 }
11721
11722 /* size port counter names and determine how many we have*/
11723 sz = 0;
11724 dd->nportcntrs = 0;
11725 for (i = 0; i < PORT_CNTR_LAST; i++) {
11726 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11727 if (port_cntrs[i].flags & CNTR_DISABLED) {
11728 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11729 continue;
11730 }
11731
11732 if (port_cntrs[i].flags & CNTR_VL) {
11733 hfi1_dbg_early("\tProcessing VL cntr\n");
11734 port_cntrs[i].offset = dd->nportcntrs;
11735 for (j = 0; j < C_VL_COUNT; j++) {
11736 memset(name, '\0', C_MAX_NAME);
11737 snprintf(name, C_MAX_NAME, "%s%d",
11738 port_cntrs[i].name,
11739 vl_from_idx(j));
11740 sz += strlen(name);
11741 sz++;
11742 hfi1_dbg_early("\t\t%s\n", name);
11743 dd->nportcntrs++;
11744 }
11745 } else {
11746 /* +1 for newline */
11747 sz += strlen(port_cntrs[i].name) + 1;
11748 port_cntrs[i].offset = dd->nportcntrs;
11749 dd->nportcntrs++;
11750 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11751 }
11752 }
11753
11754 /* allocate space for the counter names */
11755 dd->portcntrnameslen = sz;
11756 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11757 if (!dd->portcntrnames)
11758 goto bail;
11759
11760 /* fill in port cntr names */
11761 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11762 if (port_cntrs[i].flags & CNTR_DISABLED)
11763 continue;
11764
11765 if (port_cntrs[i].flags & CNTR_VL) {
11766 for (j = 0; j < C_VL_COUNT; j++) {
11767 memset(name, '\0', C_MAX_NAME);
11768 snprintf(name, C_MAX_NAME, "%s%d",
11769 port_cntrs[i].name,
11770 vl_from_idx(j));
11771 memcpy(p, name, strlen(name));
11772 p += strlen(name);
11773 *p++ = '\n';
11774 }
11775 } else {
11776 memcpy(p, port_cntrs[i].name,
11777 strlen(port_cntrs[i].name));
11778 p += strlen(port_cntrs[i].name);
11779 *p++ = '\n';
11780 }
11781 }
11782
11783 /* allocate per port storage for counter values */
11784 ppd = (struct hfi1_pportdata *)(dd + 1);
11785 for (i = 0; i < dd->num_pports; i++, ppd++) {
11786 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11787 if (!ppd->cntrs)
11788 goto bail;
11789
11790 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11791 if (!ppd->scntrs)
11792 goto bail;
11793 }
11794
11795 /* CPU counters need to be allocated and zeroed */
11796 if (init_cpu_counters(dd))
11797 goto bail;
11798
11799 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11800 return 0;
11801bail:
11802 free_cntrs(dd);
11803 return -ENOMEM;
11804}
11805
11806
11807static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11808{
11809 switch (chip_lstate) {
11810 default:
11811 dd_dev_err(dd,
11812 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11813 chip_lstate);
11814 /* fall through */
11815 case LSTATE_DOWN:
11816 return IB_PORT_DOWN;
11817 case LSTATE_INIT:
11818 return IB_PORT_INIT;
11819 case LSTATE_ARMED:
11820 return IB_PORT_ARMED;
11821 case LSTATE_ACTIVE:
11822 return IB_PORT_ACTIVE;
11823 }
11824}
11825
11826u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11827{
11828 /* look at the HFI meta-states only */
11829 switch (chip_pstate & 0xf0) {
11830 default:
11831 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11832 chip_pstate);
11833 /* fall through */
11834 case PLS_DISABLED:
11835 return IB_PORTPHYSSTATE_DISABLED;
11836 case PLS_OFFLINE:
11837 return OPA_PORTPHYSSTATE_OFFLINE;
11838 case PLS_POLLING:
11839 return IB_PORTPHYSSTATE_POLLING;
11840 case PLS_CONFIGPHY:
11841 return IB_PORTPHYSSTATE_TRAINING;
11842 case PLS_LINKUP:
11843 return IB_PORTPHYSSTATE_LINKUP;
11844 case PLS_PHYTEST:
11845 return IB_PORTPHYSSTATE_PHY_TEST;
11846 }
11847}
11848
11849/* return the OPA port logical state name */
11850const char *opa_lstate_name(u32 lstate)
11851{
11852 static const char * const port_logical_names[] = {
11853 "PORT_NOP",
11854 "PORT_DOWN",
11855 "PORT_INIT",
11856 "PORT_ARMED",
11857 "PORT_ACTIVE",
11858 "PORT_ACTIVE_DEFER",
11859 };
11860 if (lstate < ARRAY_SIZE(port_logical_names))
11861 return port_logical_names[lstate];
11862 return "unknown";
11863}
11864
11865/* return the OPA port physical state name */
11866const char *opa_pstate_name(u32 pstate)
11867{
11868 static const char * const port_physical_names[] = {
11869 "PHYS_NOP",
11870 "reserved1",
11871 "PHYS_POLL",
11872 "PHYS_DISABLED",
11873 "PHYS_TRAINING",
11874 "PHYS_LINKUP",
11875 "PHYS_LINK_ERR_RECOVER",
11876 "PHYS_PHY_TEST",
11877 "reserved8",
11878 "PHYS_OFFLINE",
11879 "PHYS_GANGED",
11880 "PHYS_TEST",
11881 };
11882 if (pstate < ARRAY_SIZE(port_physical_names))
11883 return port_physical_names[pstate];
11884 return "unknown";
11885}
11886
11887/*
11888 * Read the hardware link state and set the driver's cached value of it.
11889 * Return the (new) current value.
11890 */
11891u32 get_logical_state(struct hfi1_pportdata *ppd)
11892{
11893 u32 new_state;
11894
11895 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11896 if (new_state != ppd->lstate) {
11897 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11898 opa_lstate_name(new_state), new_state);
11899 ppd->lstate = new_state;
11900 }
11901 /*
11902 * Set port status flags in the page mapped into userspace
11903 * memory. Do it here to ensure a reliable state - this is
11904 * the only function called by all state handling code.
11905 * Always set the flags due to the fact that the cache value
11906 * might have been changed explicitly outside of this
11907 * function.
11908 */
11909 if (ppd->statusp) {
11910 switch (ppd->lstate) {
11911 case IB_PORT_DOWN:
11912 case IB_PORT_INIT:
11913 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11914 HFI1_STATUS_IB_READY);
11915 break;
11916 case IB_PORT_ARMED:
11917 *ppd->statusp |= HFI1_STATUS_IB_CONF;
11918 break;
11919 case IB_PORT_ACTIVE:
11920 *ppd->statusp |= HFI1_STATUS_IB_READY;
11921 break;
11922 }
11923 }
11924 return ppd->lstate;
11925}
11926
11927/**
11928 * wait_logical_linkstate - wait for an IB link state change to occur
11929 * @ppd: port device
11930 * @state: the state to wait for
11931 * @msecs: the number of milliseconds to wait
11932 *
11933 * Wait up to msecs milliseconds for IB link state change to occur.
11934 * For now, take the easy polling route.
11935 * Returns 0 if state reached, otherwise -ETIMEDOUT.
11936 */
11937static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
11938 int msecs)
11939{
11940 unsigned long timeout;
11941
11942 timeout = jiffies + msecs_to_jiffies(msecs);
11943 while (1) {
11944 if (get_logical_state(ppd) == state)
11945 return 0;
11946 if (time_after(jiffies, timeout))
11947 break;
11948 msleep(20);
11949 }
11950 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
11951
11952 return -ETIMEDOUT;
11953}
11954
11955u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
11956{
11957 static u32 remembered_state = 0xff;
11958 u32 pstate;
11959 u32 ib_pstate;
11960
11961 pstate = read_physical_state(ppd->dd);
11962 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
11963 if (remembered_state != ib_pstate) {
11964 dd_dev_info(ppd->dd,
11965 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
11966 __func__, opa_pstate_name(ib_pstate), ib_pstate,
11967 pstate);
11968 remembered_state = ib_pstate;
11969 }
11970 return ib_pstate;
11971}
11972
11973/*
11974 * Read/modify/write ASIC_QSFP register bits as selected by mask
11975 * data: 0 or 1 in the positions depending on what needs to be written
11976 * dir: 0 for read, 1 for write
11977 * mask: select by setting
11978 * I2CCLK (bit 0)
11979 * I2CDATA (bit 1)
11980 */
11981u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
11982 u32 mask)
11983{
11984 u64 qsfp_oe, target_oe;
11985
11986 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
11987 if (mask) {
11988 /* We are writing register bits, so lock access */
11989 dir &= mask;
11990 data &= mask;
11991
11992 qsfp_oe = read_csr(dd, target_oe);
11993 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
11994 write_csr(dd, target_oe, qsfp_oe);
11995 }
11996 /* We are exclusively reading bits here, but it is unlikely
11997 * we'll get valid data when we set the direction of the pin
11998 * in the same call, so read should call this function again
11999 * to get valid data
12000 */
12001 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12002}
12003
12004#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12005(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12006
12007#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12008(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12009
12010int hfi1_init_ctxt(struct send_context *sc)
12011{
12012 if (sc != NULL) {
12013 struct hfi1_devdata *dd = sc->dd;
12014 u64 reg;
12015 u8 set = (sc->type == SC_USER ?
12016 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12017 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12018 reg = read_kctxt_csr(dd, sc->hw_context,
12019 SEND_CTXT_CHECK_ENABLE);
12020 if (set)
12021 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12022 else
12023 SET_STATIC_RATE_CONTROL_SMASK(reg);
12024 write_kctxt_csr(dd, sc->hw_context,
12025 SEND_CTXT_CHECK_ENABLE, reg);
12026 }
12027 return 0;
12028}
12029
12030int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12031{
12032 int ret = 0;
12033 u64 reg;
12034
12035 if (dd->icode != ICODE_RTL_SILICON) {
12036 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12037 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12038 __func__);
12039 return -EINVAL;
12040 }
12041 reg = read_csr(dd, ASIC_STS_THERM);
12042 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12043 ASIC_STS_THERM_CURR_TEMP_MASK);
12044 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12045 ASIC_STS_THERM_LO_TEMP_MASK);
12046 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12047 ASIC_STS_THERM_HI_TEMP_MASK);
12048 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12049 ASIC_STS_THERM_CRIT_TEMP_MASK);
12050 /* triggers is a 3-bit value - 1 bit per trigger. */
12051 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12052
12053 return ret;
12054}
12055
12056/* ========================================================================= */
12057
12058/*
12059 * Enable/disable chip from delivering interrupts.
12060 */
12061void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12062{
12063 int i;
12064
12065 /*
12066 * In HFI, the mask needs to be 1 to allow interrupts.
12067 */
12068 if (enable) {
12069 u64 cce_int_mask;
12070 const int qsfp1_int_smask = QSFP1_INT % 64;
12071 const int qsfp2_int_smask = QSFP2_INT % 64;
12072
12073 /* enable all interrupts */
12074 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12075 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12076
12077 /*
12078 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
12079 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
12080 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
12081 * the index of the appropriate CSR in the CCEIntMask CSR array
12082 */
12083 cce_int_mask = read_csr(dd, CCE_INT_MASK +
12084 (8*(QSFP1_INT/64)));
12085 if (dd->hfi1_id) {
12086 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
12087 write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
12088 cce_int_mask);
12089 } else {
12090 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
12091 write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
12092 cce_int_mask);
12093 }
12094 } else {
12095 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12096 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12097 }
12098}
12099
12100/*
12101 * Clear all interrupt sources on the chip.
12102 */
12103static void clear_all_interrupts(struct hfi1_devdata *dd)
12104{
12105 int i;
12106
12107 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12108 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12109
12110 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12111 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12112 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12113 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12114 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12115 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12116 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12117 for (i = 0; i < dd->chip_send_contexts; i++)
12118 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12119 for (i = 0; i < dd->chip_sdma_engines; i++)
12120 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12121
12122 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12123 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12124 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12125}
12126
12127/* Move to pcie.c? */
12128static void disable_intx(struct pci_dev *pdev)
12129{
12130 pci_intx(pdev, 0);
12131}
12132
12133static void clean_up_interrupts(struct hfi1_devdata *dd)
12134{
12135 int i;
12136
12137 /* remove irqs - must happen before disabling/turning off */
12138 if (dd->num_msix_entries) {
12139 /* MSI-X */
12140 struct hfi1_msix_entry *me = dd->msix_entries;
12141
12142 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12143 if (me->arg == NULL) /* => no irq, no affinity */
12144 break;
12145 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12146 NULL);
12147 free_irq(me->msix.vector, me->arg);
12148 }
12149 } else {
12150 /* INTx */
12151 if (dd->requested_intx_irq) {
12152 free_irq(dd->pcidev->irq, dd);
12153 dd->requested_intx_irq = 0;
12154 }
12155 }
12156
12157 /* turn off interrupts */
12158 if (dd->num_msix_entries) {
12159 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012160 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012161 } else {
12162 /* INTx */
12163 disable_intx(dd->pcidev);
12164 }
12165
12166 /* clean structures */
12167 for (i = 0; i < dd->num_msix_entries; i++)
12168 free_cpumask_var(dd->msix_entries[i].mask);
12169 kfree(dd->msix_entries);
12170 dd->msix_entries = NULL;
12171 dd->num_msix_entries = 0;
12172}
12173
12174/*
12175 * Remap the interrupt source from the general handler to the given MSI-X
12176 * interrupt.
12177 */
12178static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12179{
12180 u64 reg;
12181 int m, n;
12182
12183 /* clear from the handled mask of the general interrupt */
12184 m = isrc / 64;
12185 n = isrc % 64;
12186 dd->gi_mask[m] &= ~((u64)1 << n);
12187
12188 /* direct the chip source to the given MSI-X interrupt */
12189 m = isrc / 8;
12190 n = isrc % 8;
12191 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12192 reg &= ~((u64)0xff << (8*n));
12193 reg |= ((u64)msix_intr & 0xff) << (8*n);
12194 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12195}
12196
12197static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12198 int engine, int msix_intr)
12199{
12200 /*
12201 * SDMA engine interrupt sources grouped by type, rather than
12202 * engine. Per-engine interrupts are as follows:
12203 * SDMA
12204 * SDMAProgress
12205 * SDMAIdle
12206 */
12207 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12208 msix_intr);
12209 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12210 msix_intr);
12211 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12212 msix_intr);
12213}
12214
Mike Marciniszyn77241052015-07-30 15:17:43 -040012215static int request_intx_irq(struct hfi1_devdata *dd)
12216{
12217 int ret;
12218
Jubin John98050712015-11-16 21:59:27 -050012219 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12220 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012221 ret = request_irq(dd->pcidev->irq, general_interrupt,
12222 IRQF_SHARED, dd->intx_name, dd);
12223 if (ret)
12224 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12225 ret);
12226 else
12227 dd->requested_intx_irq = 1;
12228 return ret;
12229}
12230
12231static int request_msix_irqs(struct hfi1_devdata *dd)
12232{
12233 const struct cpumask *local_mask;
12234 cpumask_var_t def, rcv;
12235 bool def_ret, rcv_ret;
12236 int first_general, last_general;
12237 int first_sdma, last_sdma;
12238 int first_rx, last_rx;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012239 int first_cpu, curr_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012240 int rcv_cpu, sdma_cpu;
12241 int i, ret = 0, possible;
12242 int ht;
12243
12244 /* calculate the ranges we are going to use */
12245 first_general = 0;
12246 first_sdma = last_general = first_general + 1;
12247 first_rx = last_sdma = first_sdma + dd->num_sdma;
12248 last_rx = first_rx + dd->n_krcv_queues;
12249
12250 /*
12251 * Interrupt affinity.
12252 *
12253 * non-rcv avail gets a default mask that
12254 * starts as possible cpus with threads reset
12255 * and each rcv avail reset.
12256 *
12257 * rcv avail gets node relative 1 wrapping back
12258 * to the node relative 1 as necessary.
12259 *
12260 */
12261 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12262 /* if first cpu is invalid, use NUMA 0 */
12263 if (cpumask_first(local_mask) >= nr_cpu_ids)
12264 local_mask = topology_core_cpumask(0);
12265
12266 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12267 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12268 if (!def_ret || !rcv_ret)
12269 goto bail;
12270 /* use local mask as default */
12271 cpumask_copy(def, local_mask);
12272 possible = cpumask_weight(def);
12273 /* disarm threads from default */
12274 ht = cpumask_weight(
12275 topology_sibling_cpumask(cpumask_first(local_mask)));
12276 for (i = possible/ht; i < possible; i++)
12277 cpumask_clear_cpu(i, def);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012278 /* def now has full cores on chosen node*/
12279 first_cpu = cpumask_first(def);
12280 if (nr_cpu_ids >= first_cpu)
12281 first_cpu++;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012282 curr_cpu = first_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012283
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012284 /* One context is reserved as control context */
12285 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012286 cpumask_clear_cpu(curr_cpu, def);
12287 cpumask_set_cpu(curr_cpu, rcv);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012288 curr_cpu = cpumask_next(curr_cpu, def);
12289 if (curr_cpu >= nr_cpu_ids)
12290 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012291 }
12292 /* def mask has non-rcv, rcv has recv mask */
12293 rcv_cpu = cpumask_first(rcv);
12294 sdma_cpu = cpumask_first(def);
12295
12296 /*
12297 * Sanity check - the code expects all SDMA chip source
12298 * interrupts to be in the same CSR, starting at bit 0. Verify
12299 * that this is true by checking the bit location of the start.
12300 */
12301 BUILD_BUG_ON(IS_SDMA_START % 64);
12302
12303 for (i = 0; i < dd->num_msix_entries; i++) {
12304 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12305 const char *err_info;
12306 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012307 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012308 void *arg;
12309 int idx;
12310 struct hfi1_ctxtdata *rcd = NULL;
12311 struct sdma_engine *sde = NULL;
12312
12313 /* obtain the arguments to request_irq */
12314 if (first_general <= i && i < last_general) {
12315 idx = i - first_general;
12316 handler = general_interrupt;
12317 arg = dd;
12318 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012319 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012320 err_info = "general";
12321 } else if (first_sdma <= i && i < last_sdma) {
12322 idx = i - first_sdma;
12323 sde = &dd->per_sdma[idx];
12324 handler = sdma_interrupt;
12325 arg = sde;
12326 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012327 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012328 err_info = "sdma";
12329 remap_sdma_interrupts(dd, idx, i);
12330 } else if (first_rx <= i && i < last_rx) {
12331 idx = i - first_rx;
12332 rcd = dd->rcd[idx];
12333 /* no interrupt if no rcd */
12334 if (!rcd)
12335 continue;
12336 /*
12337 * Set the interrupt register and mask for this
12338 * context's interrupt.
12339 */
12340 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12341 rcd->imask = ((u64)1) <<
12342 ((IS_RCVAVAIL_START+idx) % 64);
12343 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012344 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012345 arg = rcd;
12346 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012347 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012348 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012349 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012350 } else {
12351 /* not in our expected range - complain, then
12352 ignore it */
12353 dd_dev_err(dd,
12354 "Unexpected extra MSI-X interrupt %d\n", i);
12355 continue;
12356 }
12357 /* no argument, no interrupt */
12358 if (arg == NULL)
12359 continue;
12360 /* make sure the name is terminated */
12361 me->name[sizeof(me->name)-1] = 0;
12362
Dean Luickf4f30031c2015-10-26 10:28:44 -040012363 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12364 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012365 if (ret) {
12366 dd_dev_err(dd,
12367 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12368 err_info, me->msix.vector, idx, ret);
12369 return ret;
12370 }
12371 /*
12372 * assign arg after request_irq call, so it will be
12373 * cleaned up
12374 */
12375 me->arg = arg;
12376
12377 if (!zalloc_cpumask_var(
12378 &dd->msix_entries[i].mask,
12379 GFP_KERNEL))
12380 goto bail;
12381 if (handler == sdma_interrupt) {
12382 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12383 sde->this_idx, sdma_cpu);
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -050012384 sde->cpu = sdma_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012385 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12386 sdma_cpu = cpumask_next(sdma_cpu, def);
12387 if (sdma_cpu >= nr_cpu_ids)
12388 sdma_cpu = cpumask_first(def);
12389 } else if (handler == receive_context_interrupt) {
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012390 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12391 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12392 cpumask_first(def) : rcv_cpu);
12393 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12394 /* map to first default */
12395 cpumask_set_cpu(cpumask_first(def),
12396 dd->msix_entries[i].mask);
12397 } else {
12398 cpumask_set_cpu(rcv_cpu,
12399 dd->msix_entries[i].mask);
12400 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12401 if (rcv_cpu >= nr_cpu_ids)
12402 rcv_cpu = cpumask_first(rcv);
12403 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012404 } else {
12405 /* otherwise first def */
12406 dd_dev_info(dd, "%s cpu %d\n",
12407 err_info, cpumask_first(def));
12408 cpumask_set_cpu(
12409 cpumask_first(def), dd->msix_entries[i].mask);
12410 }
12411 irq_set_affinity_hint(
12412 dd->msix_entries[i].msix.vector,
12413 dd->msix_entries[i].mask);
12414 }
12415
12416out:
12417 free_cpumask_var(def);
12418 free_cpumask_var(rcv);
12419 return ret;
12420bail:
12421 ret = -ENOMEM;
12422 goto out;
12423}
12424
12425/*
12426 * Set the general handler to accept all interrupts, remap all
12427 * chip interrupts back to MSI-X 0.
12428 */
12429static void reset_interrupts(struct hfi1_devdata *dd)
12430{
12431 int i;
12432
12433 /* all interrupts handled by the general handler */
12434 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12435 dd->gi_mask[i] = ~(u64)0;
12436
12437 /* all chip interrupts map to MSI-X 0 */
12438 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12439 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12440}
12441
12442static int set_up_interrupts(struct hfi1_devdata *dd)
12443{
12444 struct hfi1_msix_entry *entries;
12445 u32 total, request;
12446 int i, ret;
12447 int single_interrupt = 0; /* we expect to have all the interrupts */
12448
12449 /*
12450 * Interrupt count:
12451 * 1 general, "slow path" interrupt (includes the SDMA engines
12452 * slow source, SDMACleanupDone)
12453 * N interrupts - one per used SDMA engine
12454 * M interrupt - one per kernel receive context
12455 */
12456 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12457
12458 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12459 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012460 ret = -ENOMEM;
12461 goto fail;
12462 }
12463 /* 1-1 MSI-X entry assignment */
12464 for (i = 0; i < total; i++)
12465 entries[i].msix.entry = i;
12466
12467 /* ask for MSI-X interrupts */
12468 request = total;
12469 request_msix(dd, &request, entries);
12470
12471 if (request == 0) {
12472 /* using INTx */
12473 /* dd->num_msix_entries already zero */
12474 kfree(entries);
12475 single_interrupt = 1;
12476 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12477 } else {
12478 /* using MSI-X */
12479 dd->num_msix_entries = request;
12480 dd->msix_entries = entries;
12481
12482 if (request != total) {
12483 /* using MSI-X, with reduced interrupts */
12484 dd_dev_err(
12485 dd,
12486 "cannot handle reduced interrupt case, want %u, got %u\n",
12487 total, request);
12488 ret = -EINVAL;
12489 goto fail;
12490 }
12491 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12492 }
12493
12494 /* mask all interrupts */
12495 set_intr_state(dd, 0);
12496 /* clear all pending interrupts */
12497 clear_all_interrupts(dd);
12498
12499 /* reset general handler mask, chip MSI-X mappings */
12500 reset_interrupts(dd);
12501
12502 if (single_interrupt)
12503 ret = request_intx_irq(dd);
12504 else
12505 ret = request_msix_irqs(dd);
12506 if (ret)
12507 goto fail;
12508
12509 return 0;
12510
12511fail:
12512 clean_up_interrupts(dd);
12513 return ret;
12514}
12515
12516/*
12517 * Set up context values in dd. Sets:
12518 *
12519 * num_rcv_contexts - number of contexts being used
12520 * n_krcv_queues - number of kernel contexts
12521 * first_user_ctxt - first non-kernel context in array of contexts
12522 * freectxts - number of free user contexts
12523 * num_send_contexts - number of PIO send contexts being used
12524 */
12525static int set_up_context_variables(struct hfi1_devdata *dd)
12526{
12527 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012528 int total_contexts;
12529 int ret;
12530 unsigned ngroups;
12531
12532 /*
12533 * Kernel contexts: (to be fixed later):
12534 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012535 * - Context 0 - control context (VL15/multicast/error)
12536 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012537 */
12538 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012539 /*
12540 * Don't count context 0 in n_krcvqs since
12541 * is isn't used for normal verbs traffic.
12542 *
12543 * krcvqs will reflect number of kernel
12544 * receive contexts above 0.
12545 */
12546 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012547 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012548 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012549 num_kernel_contexts =
12550 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12551 /*
12552 * Every kernel receive context needs an ACK send context.
12553 * one send context is allocated for each VL{0-7} and VL15
12554 */
12555 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12556 dd_dev_err(dd,
12557 "Reducing # kernel rcv contexts to: %d, from %d\n",
12558 (int)(dd->chip_send_contexts - num_vls - 1),
12559 (int)num_kernel_contexts);
12560 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12561 }
12562 /*
12563 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012564 * - default to 1 user context per CPU if num_user_contexts is
12565 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012566 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012567 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012568 num_user_contexts = num_online_cpus();
12569
12570 total_contexts = num_kernel_contexts + num_user_contexts;
12571
12572 /*
12573 * Adjust the counts given a global max.
12574 */
12575 if (total_contexts > dd->chip_rcv_contexts) {
12576 dd_dev_err(dd,
12577 "Reducing # user receive contexts to: %d, from %d\n",
12578 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12579 (int)num_user_contexts);
12580 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12581 /* recalculate */
12582 total_contexts = num_kernel_contexts + num_user_contexts;
12583 }
12584
12585 /* the first N are kernel contexts, the rest are user contexts */
12586 dd->num_rcv_contexts = total_contexts;
12587 dd->n_krcv_queues = num_kernel_contexts;
12588 dd->first_user_ctxt = num_kernel_contexts;
12589 dd->freectxts = num_user_contexts;
12590 dd_dev_info(dd,
12591 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12592 (int)dd->chip_rcv_contexts,
12593 (int)dd->num_rcv_contexts,
12594 (int)dd->n_krcv_queues,
12595 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12596
12597 /*
12598 * Receive array allocation:
12599 * All RcvArray entries are divided into groups of 8. This
12600 * is required by the hardware and will speed up writes to
12601 * consecutive entries by using write-combining of the entire
12602 * cacheline.
12603 *
12604 * The number of groups are evenly divided among all contexts.
12605 * any left over groups will be given to the first N user
12606 * contexts.
12607 */
12608 dd->rcv_entries.group_size = RCV_INCREMENT;
12609 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12610 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12611 dd->rcv_entries.nctxt_extra = ngroups -
12612 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12613 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12614 dd->rcv_entries.ngroups,
12615 dd->rcv_entries.nctxt_extra);
12616 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12617 MAX_EAGER_ENTRIES * 2) {
12618 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12619 dd->rcv_entries.group_size;
12620 dd_dev_info(dd,
12621 "RcvArray group count too high, change to %u\n",
12622 dd->rcv_entries.ngroups);
12623 dd->rcv_entries.nctxt_extra = 0;
12624 }
12625 /*
12626 * PIO send contexts
12627 */
12628 ret = init_sc_pools_and_sizes(dd);
12629 if (ret >= 0) { /* success */
12630 dd->num_send_contexts = ret;
12631 dd_dev_info(
12632 dd,
12633 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12634 dd->chip_send_contexts,
12635 dd->num_send_contexts,
12636 dd->sc_sizes[SC_KERNEL].count,
12637 dd->sc_sizes[SC_ACK].count,
12638 dd->sc_sizes[SC_USER].count);
12639 ret = 0; /* success */
12640 }
12641
12642 return ret;
12643}
12644
12645/*
12646 * Set the device/port partition key table. The MAD code
12647 * will ensure that, at least, the partial management
12648 * partition key is present in the table.
12649 */
12650static void set_partition_keys(struct hfi1_pportdata *ppd)
12651{
12652 struct hfi1_devdata *dd = ppd->dd;
12653 u64 reg = 0;
12654 int i;
12655
12656 dd_dev_info(dd, "Setting partition keys\n");
12657 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12658 reg |= (ppd->pkeys[i] &
12659 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12660 ((i % 4) *
12661 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12662 /* Each register holds 4 PKey values. */
12663 if ((i % 4) == 3) {
12664 write_csr(dd, RCV_PARTITION_KEY +
12665 ((i - 3) * 2), reg);
12666 reg = 0;
12667 }
12668 }
12669
12670 /* Always enable HW pkeys check when pkeys table is set */
12671 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12672}
12673
12674/*
12675 * These CSRs and memories are uninitialized on reset and must be
12676 * written before reading to set the ECC/parity bits.
12677 *
12678 * NOTE: All user context CSRs that are not mmaped write-only
12679 * (e.g. the TID flows) must be initialized even if the driver never
12680 * reads them.
12681 */
12682static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12683{
12684 int i, j;
12685
12686 /* CceIntMap */
12687 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12688 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12689
12690 /* SendCtxtCreditReturnAddr */
12691 for (i = 0; i < dd->chip_send_contexts; i++)
12692 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12693
12694 /* PIO Send buffers */
12695 /* SDMA Send buffers */
12696 /* These are not normally read, and (presently) have no method
12697 to be read, so are not pre-initialized */
12698
12699 /* RcvHdrAddr */
12700 /* RcvHdrTailAddr */
12701 /* RcvTidFlowTable */
12702 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12703 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12704 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12705 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12706 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12707 }
12708
12709 /* RcvArray */
12710 for (i = 0; i < dd->chip_rcv_array_count; i++)
12711 write_csr(dd, RCV_ARRAY + (8*i),
12712 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12713
12714 /* RcvQPMapTable */
12715 for (i = 0; i < 32; i++)
12716 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12717}
12718
12719/*
12720 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12721 */
12722static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12723 u64 ctrl_bits)
12724{
12725 unsigned long timeout;
12726 u64 reg;
12727
12728 /* is the condition present? */
12729 reg = read_csr(dd, CCE_STATUS);
12730 if ((reg & status_bits) == 0)
12731 return;
12732
12733 /* clear the condition */
12734 write_csr(dd, CCE_CTRL, ctrl_bits);
12735
12736 /* wait for the condition to clear */
12737 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12738 while (1) {
12739 reg = read_csr(dd, CCE_STATUS);
12740 if ((reg & status_bits) == 0)
12741 return;
12742 if (time_after(jiffies, timeout)) {
12743 dd_dev_err(dd,
12744 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12745 status_bits, reg & status_bits);
12746 return;
12747 }
12748 udelay(1);
12749 }
12750}
12751
12752/* set CCE CSRs to chip reset defaults */
12753static void reset_cce_csrs(struct hfi1_devdata *dd)
12754{
12755 int i;
12756
12757 /* CCE_REVISION read-only */
12758 /* CCE_REVISION2 read-only */
12759 /* CCE_CTRL - bits clear automatically */
12760 /* CCE_STATUS read-only, use CceCtrl to clear */
12761 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12762 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12763 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12764 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12765 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12766 /* CCE_ERR_STATUS read-only */
12767 write_csr(dd, CCE_ERR_MASK, 0);
12768 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12769 /* CCE_ERR_FORCE leave alone */
12770 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12771 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12772 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12773 /* CCE_PCIE_CTRL leave alone */
12774 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12775 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12776 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12777 CCE_MSIX_TABLE_UPPER_RESETCSR);
12778 }
12779 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12780 /* CCE_MSIX_PBA read-only */
12781 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12782 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12783 }
12784 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12785 write_csr(dd, CCE_INT_MAP, 0);
12786 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12787 /* CCE_INT_STATUS read-only */
12788 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12789 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12790 /* CCE_INT_FORCE leave alone */
12791 /* CCE_INT_BLOCKED read-only */
12792 }
12793 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12794 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12795}
12796
12797/* set ASIC CSRs to chip reset defaults */
12798static void reset_asic_csrs(struct hfi1_devdata *dd)
12799{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012800 int i;
12801
12802 /*
12803 * If the HFIs are shared between separate nodes or VMs,
12804 * then more will need to be done here. One idea is a module
12805 * parameter that returns early, letting the first power-on or
12806 * a known first load do the reset and blocking all others.
12807 */
12808
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012809 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12810 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012811
12812 if (dd->icode != ICODE_FPGA_EMULATION) {
12813 /* emulation does not have an SBus - leave these alone */
12814 /*
12815 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12816 * Notes:
12817 * o The reset is not zero if aimed at the core. See the
12818 * SBus documentation for details.
12819 * o If the SBus firmware has been updated (e.g. by the BIOS),
12820 * will the reset revert that?
12821 */
12822 /* ASIC_CFG_SBUS_REQUEST leave alone */
12823 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12824 }
12825 /* ASIC_SBUS_RESULT read-only */
12826 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12827 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12828 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12829 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012830
12831 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012832 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012833
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012834 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012835 /* ASIC_STS_THERM read-only */
12836 /* ASIC_CFG_RESET leave alone */
12837
12838 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12839 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12840 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12841 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12842 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12843 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12844 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12845 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12846 for (i = 0; i < 16; i++)
12847 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12848
12849 /* ASIC_GPIO_IN read-only */
12850 write_csr(dd, ASIC_GPIO_OE, 0);
12851 write_csr(dd, ASIC_GPIO_INVERT, 0);
12852 write_csr(dd, ASIC_GPIO_OUT, 0);
12853 write_csr(dd, ASIC_GPIO_MASK, 0);
12854 /* ASIC_GPIO_STATUS read-only */
12855 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12856 /* ASIC_GPIO_FORCE leave alone */
12857
12858 /* ASIC_QSFP1_IN read-only */
12859 write_csr(dd, ASIC_QSFP1_OE, 0);
12860 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12861 write_csr(dd, ASIC_QSFP1_OUT, 0);
12862 write_csr(dd, ASIC_QSFP1_MASK, 0);
12863 /* ASIC_QSFP1_STATUS read-only */
12864 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12865 /* ASIC_QSFP1_FORCE leave alone */
12866
12867 /* ASIC_QSFP2_IN read-only */
12868 write_csr(dd, ASIC_QSFP2_OE, 0);
12869 write_csr(dd, ASIC_QSFP2_INVERT, 0);
12870 write_csr(dd, ASIC_QSFP2_OUT, 0);
12871 write_csr(dd, ASIC_QSFP2_MASK, 0);
12872 /* ASIC_QSFP2_STATUS read-only */
12873 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12874 /* ASIC_QSFP2_FORCE leave alone */
12875
12876 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12877 /* this also writes a NOP command, clearing paging mode */
12878 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12879 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012880}
12881
12882/* set MISC CSRs to chip reset defaults */
12883static void reset_misc_csrs(struct hfi1_devdata *dd)
12884{
12885 int i;
12886
12887 for (i = 0; i < 32; i++) {
12888 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12889 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12890 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12891 }
12892 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12893 only be written 128-byte chunks */
12894 /* init RSA engine to clear lingering errors */
12895 write_csr(dd, MISC_CFG_RSA_CMD, 1);
12896 write_csr(dd, MISC_CFG_RSA_MU, 0);
12897 write_csr(dd, MISC_CFG_FW_CTRL, 0);
12898 /* MISC_STS_8051_DIGEST read-only */
12899 /* MISC_STS_SBM_DIGEST read-only */
12900 /* MISC_STS_PCIE_DIGEST read-only */
12901 /* MISC_STS_FAB_DIGEST read-only */
12902 /* MISC_ERR_STATUS read-only */
12903 write_csr(dd, MISC_ERR_MASK, 0);
12904 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12905 /* MISC_ERR_FORCE leave alone */
12906}
12907
12908/* set TXE CSRs to chip reset defaults */
12909static void reset_txe_csrs(struct hfi1_devdata *dd)
12910{
12911 int i;
12912
12913 /*
12914 * TXE Kernel CSRs
12915 */
12916 write_csr(dd, SEND_CTRL, 0);
12917 __cm_reset(dd, 0); /* reset CM internal state */
12918 /* SEND_CONTEXTS read-only */
12919 /* SEND_DMA_ENGINES read-only */
12920 /* SEND_PIO_MEM_SIZE read-only */
12921 /* SEND_DMA_MEM_SIZE read-only */
12922 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12923 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
12924 /* SEND_PIO_ERR_STATUS read-only */
12925 write_csr(dd, SEND_PIO_ERR_MASK, 0);
12926 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12927 /* SEND_PIO_ERR_FORCE leave alone */
12928 /* SEND_DMA_ERR_STATUS read-only */
12929 write_csr(dd, SEND_DMA_ERR_MASK, 0);
12930 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12931 /* SEND_DMA_ERR_FORCE leave alone */
12932 /* SEND_EGRESS_ERR_STATUS read-only */
12933 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12934 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12935 /* SEND_EGRESS_ERR_FORCE leave alone */
12936 write_csr(dd, SEND_BTH_QP, 0);
12937 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12938 write_csr(dd, SEND_SC2VLT0, 0);
12939 write_csr(dd, SEND_SC2VLT1, 0);
12940 write_csr(dd, SEND_SC2VLT2, 0);
12941 write_csr(dd, SEND_SC2VLT3, 0);
12942 write_csr(dd, SEND_LEN_CHECK0, 0);
12943 write_csr(dd, SEND_LEN_CHECK1, 0);
12944 /* SEND_ERR_STATUS read-only */
12945 write_csr(dd, SEND_ERR_MASK, 0);
12946 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
12947 /* SEND_ERR_FORCE read-only */
12948 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
12949 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
12950 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
12951 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
12952 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
12953 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
12954 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
12955 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
12956 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
12957 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
12958 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
12959 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
12960 SEND_CM_GLOBAL_CREDIT_RESETCSR);
12961 /* SEND_CM_CREDIT_USED_STATUS read-only */
12962 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
12963 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
12964 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
12965 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
12966 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
12967 for (i = 0; i < TXE_NUM_DATA_VL; i++)
12968 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
12969 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
12970 /* SEND_CM_CREDIT_USED_VL read-only */
12971 /* SEND_CM_CREDIT_USED_VL15 read-only */
12972 /* SEND_EGRESS_CTXT_STATUS read-only */
12973 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
12974 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
12975 /* SEND_EGRESS_ERR_INFO read-only */
12976 /* SEND_EGRESS_ERR_SOURCE read-only */
12977
12978 /*
12979 * TXE Per-Context CSRs
12980 */
12981 for (i = 0; i < dd->chip_send_contexts; i++) {
12982 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
12983 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
12984 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12985 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
12986 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
12987 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
12988 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
12989 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
12990 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
12991 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
12992 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
12993 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
12994 }
12995
12996 /*
12997 * TXE Per-SDMA CSRs
12998 */
12999 for (i = 0; i < dd->chip_sdma_engines; i++) {
13000 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13001 /* SEND_DMA_STATUS read-only */
13002 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13003 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13004 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13005 /* SEND_DMA_HEAD read-only */
13006 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13007 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13008 /* SEND_DMA_IDLE_CNT read-only */
13009 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13010 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13011 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13012 /* SEND_DMA_ENG_ERR_STATUS read-only */
13013 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13014 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13015 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13016 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13017 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13018 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13019 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13020 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13021 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13022 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13023 }
13024}
13025
13026/*
13027 * Expect on entry:
13028 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13029 */
13030static void init_rbufs(struct hfi1_devdata *dd)
13031{
13032 u64 reg;
13033 int count;
13034
13035 /*
13036 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13037 * clear.
13038 */
13039 count = 0;
13040 while (1) {
13041 reg = read_csr(dd, RCV_STATUS);
13042 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13043 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13044 break;
13045 /*
13046 * Give up after 1ms - maximum wait time.
13047 *
13048 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13049 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13050 * 148 KB / (66% * 250MB/s) = 920us
13051 */
13052 if (count++ > 500) {
13053 dd_dev_err(dd,
13054 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13055 __func__, reg);
13056 break;
13057 }
13058 udelay(2); /* do not busy-wait the CSR */
13059 }
13060
13061 /* start the init - expect RcvCtrl to be 0 */
13062 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13063
13064 /*
13065 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13066 * period after the write before RcvStatus.RxRbufInitDone is valid.
13067 * The delay in the first run through the loop below is sufficient and
13068 * required before the first read of RcvStatus.RxRbufInintDone.
13069 */
13070 read_csr(dd, RCV_CTRL);
13071
13072 /* wait for the init to finish */
13073 count = 0;
13074 while (1) {
13075 /* delay is required first time through - see above */
13076 udelay(2); /* do not busy-wait the CSR */
13077 reg = read_csr(dd, RCV_STATUS);
13078 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13079 break;
13080
13081 /* give up after 100us - slowest possible at 33MHz is 73us */
13082 if (count++ > 50) {
13083 dd_dev_err(dd,
13084 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13085 __func__);
13086 break;
13087 }
13088 }
13089}
13090
13091/* set RXE CSRs to chip reset defaults */
13092static void reset_rxe_csrs(struct hfi1_devdata *dd)
13093{
13094 int i, j;
13095
13096 /*
13097 * RXE Kernel CSRs
13098 */
13099 write_csr(dd, RCV_CTRL, 0);
13100 init_rbufs(dd);
13101 /* RCV_STATUS read-only */
13102 /* RCV_CONTEXTS read-only */
13103 /* RCV_ARRAY_CNT read-only */
13104 /* RCV_BUF_SIZE read-only */
13105 write_csr(dd, RCV_BTH_QP, 0);
13106 write_csr(dd, RCV_MULTICAST, 0);
13107 write_csr(dd, RCV_BYPASS, 0);
13108 write_csr(dd, RCV_VL15, 0);
13109 /* this is a clear-down */
13110 write_csr(dd, RCV_ERR_INFO,
13111 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13112 /* RCV_ERR_STATUS read-only */
13113 write_csr(dd, RCV_ERR_MASK, 0);
13114 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13115 /* RCV_ERR_FORCE leave alone */
13116 for (i = 0; i < 32; i++)
13117 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13118 for (i = 0; i < 4; i++)
13119 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13120 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13121 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13122 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13123 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13124 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13125 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13126 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13127 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13128 }
13129 for (i = 0; i < 32; i++)
13130 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13131
13132 /*
13133 * RXE Kernel and User Per-Context CSRs
13134 */
13135 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13136 /* kernel */
13137 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13138 /* RCV_CTXT_STATUS read-only */
13139 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13140 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13141 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13142 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13143 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13144 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13145 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13146 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13147 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13148 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13149
13150 /* user */
13151 /* RCV_HDR_TAIL read-only */
13152 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13153 /* RCV_EGR_INDEX_TAIL read-only */
13154 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13155 /* RCV_EGR_OFFSET_TAIL read-only */
13156 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13157 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13158 0);
13159 }
13160 }
13161}
13162
13163/*
13164 * Set sc2vl tables.
13165 *
13166 * They power on to zeros, so to avoid send context errors
13167 * they need to be set:
13168 *
13169 * SC 0-7 -> VL 0-7 (respectively)
13170 * SC 15 -> VL 15
13171 * otherwise
13172 * -> VL 0
13173 */
13174static void init_sc2vl_tables(struct hfi1_devdata *dd)
13175{
13176 int i;
13177 /* init per architecture spec, constrained by hardware capability */
13178
13179 /* HFI maps sent packets */
13180 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13181 0,
13182 0, 0, 1, 1,
13183 2, 2, 3, 3,
13184 4, 4, 5, 5,
13185 6, 6, 7, 7));
13186 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13187 1,
13188 8, 0, 9, 0,
13189 10, 0, 11, 0,
13190 12, 0, 13, 0,
13191 14, 0, 15, 15));
13192 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13193 2,
13194 16, 0, 17, 0,
13195 18, 0, 19, 0,
13196 20, 0, 21, 0,
13197 22, 0, 23, 0));
13198 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13199 3,
13200 24, 0, 25, 0,
13201 26, 0, 27, 0,
13202 28, 0, 29, 0,
13203 30, 0, 31, 0));
13204
13205 /* DC maps received packets */
13206 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13207 15_0,
13208 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13209 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13210 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13211 31_16,
13212 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13213 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13214
13215 /* initialize the cached sc2vl values consistently with h/w */
13216 for (i = 0; i < 32; i++) {
13217 if (i < 8 || i == 15)
13218 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13219 else
13220 *((u8 *)(dd->sc2vl) + i) = 0;
13221 }
13222}
13223
13224/*
13225 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13226 * depend on the chip going through a power-on reset - a driver may be loaded
13227 * and unloaded many times.
13228 *
13229 * Do not write any CSR values to the chip in this routine - there may be
13230 * a reset following the (possible) FLR in this routine.
13231 *
13232 */
13233static void init_chip(struct hfi1_devdata *dd)
13234{
13235 int i;
13236
13237 /*
13238 * Put the HFI CSRs in a known state.
13239 * Combine this with a DC reset.
13240 *
13241 * Stop the device from doing anything while we do a
13242 * reset. We know there are no other active users of
13243 * the device since we are now in charge. Turn off
13244 * off all outbound and inbound traffic and make sure
13245 * the device does not generate any interrupts.
13246 */
13247
13248 /* disable send contexts and SDMA engines */
13249 write_csr(dd, SEND_CTRL, 0);
13250 for (i = 0; i < dd->chip_send_contexts; i++)
13251 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13252 for (i = 0; i < dd->chip_sdma_engines; i++)
13253 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13254 /* disable port (turn off RXE inbound traffic) and contexts */
13255 write_csr(dd, RCV_CTRL, 0);
13256 for (i = 0; i < dd->chip_rcv_contexts; i++)
13257 write_csr(dd, RCV_CTXT_CTRL, 0);
13258 /* mask all interrupt sources */
13259 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13260 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13261
13262 /*
13263 * DC Reset: do a full DC reset before the register clear.
13264 * A recommended length of time to hold is one CSR read,
13265 * so reread the CceDcCtrl. Then, hold the DC in reset
13266 * across the clear.
13267 */
13268 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13269 (void) read_csr(dd, CCE_DC_CTRL);
13270
13271 if (use_flr) {
13272 /*
13273 * A FLR will reset the SPC core and part of the PCIe.
13274 * The parts that need to be restored have already been
13275 * saved.
13276 */
13277 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13278
13279 /* do the FLR, the DC reset will remain */
13280 hfi1_pcie_flr(dd);
13281
13282 /* restore command and BARs */
13283 restore_pci_variables(dd);
13284
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013285 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013286 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13287 hfi1_pcie_flr(dd);
13288 restore_pci_variables(dd);
13289 }
13290
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013291 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013292 } else {
13293 dd_dev_info(dd, "Resetting CSRs with writes\n");
13294 reset_cce_csrs(dd);
13295 reset_txe_csrs(dd);
13296 reset_rxe_csrs(dd);
13297 reset_asic_csrs(dd);
13298 reset_misc_csrs(dd);
13299 }
13300 /* clear the DC reset */
13301 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013302
Mike Marciniszyn77241052015-07-30 15:17:43 -040013303 /* Set the LED off */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013304 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013305 setextled(dd, 0);
13306 /*
13307 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013308 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013309 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013310 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013311 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013312 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013313 * I2CCLK and I2CDAT will change per direction, and INT_N and
13314 * MODPRS_N are input only and their value is ignored.
13315 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013316 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13317 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013318}
13319
13320static void init_early_variables(struct hfi1_devdata *dd)
13321{
13322 int i;
13323
13324 /* assign link credit variables */
13325 dd->vau = CM_VAU;
13326 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013327 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013328 dd->link_credits--;
13329 dd->vcu = cu_to_vcu(hfi1_cu);
13330 /* enough room for 8 MAD packets plus header - 17K */
13331 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13332 if (dd->vl15_init > dd->link_credits)
13333 dd->vl15_init = dd->link_credits;
13334
13335 write_uninitialized_csrs_and_memories(dd);
13336
13337 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13338 for (i = 0; i < dd->num_pports; i++) {
13339 struct hfi1_pportdata *ppd = &dd->pport[i];
13340
13341 set_partition_keys(ppd);
13342 }
13343 init_sc2vl_tables(dd);
13344}
13345
13346static void init_kdeth_qp(struct hfi1_devdata *dd)
13347{
13348 /* user changed the KDETH_QP */
13349 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13350 /* out of range or illegal value */
13351 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13352 kdeth_qp = 0;
13353 }
13354 if (kdeth_qp == 0) /* not set, or failed range check */
13355 kdeth_qp = DEFAULT_KDETH_QP;
13356
13357 write_csr(dd, SEND_BTH_QP,
13358 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13359 << SEND_BTH_QP_KDETH_QP_SHIFT);
13360
13361 write_csr(dd, RCV_BTH_QP,
13362 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13363 << RCV_BTH_QP_KDETH_QP_SHIFT);
13364}
13365
13366/**
13367 * init_qpmap_table
13368 * @dd - device data
13369 * @first_ctxt - first context
13370 * @last_ctxt - first context
13371 *
13372 * This return sets the qpn mapping table that
13373 * is indexed by qpn[8:1].
13374 *
13375 * The routine will round robin the 256 settings
13376 * from first_ctxt to last_ctxt.
13377 *
13378 * The first/last looks ahead to having specialized
13379 * receive contexts for mgmt and bypass. Normal
13380 * verbs traffic will assumed to be on a range
13381 * of receive contexts.
13382 */
13383static void init_qpmap_table(struct hfi1_devdata *dd,
13384 u32 first_ctxt,
13385 u32 last_ctxt)
13386{
13387 u64 reg = 0;
13388 u64 regno = RCV_QP_MAP_TABLE;
13389 int i;
13390 u64 ctxt = first_ctxt;
13391
13392 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013393 reg |= ctxt << (8 * (i % 8));
13394 i++;
13395 ctxt++;
13396 if (ctxt > last_ctxt)
13397 ctxt = first_ctxt;
13398 if (i % 8 == 0) {
13399 write_csr(dd, regno, reg);
13400 reg = 0;
13401 regno += 8;
13402 }
13403 }
13404 if (i % 8)
13405 write_csr(dd, regno, reg);
13406
13407 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13408 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13409}
13410
13411/**
13412 * init_qos - init RX qos
13413 * @dd - device data
13414 * @first_context
13415 *
13416 * This routine initializes Rule 0 and the
13417 * RSM map table to implement qos.
13418 *
13419 * If all of the limit tests succeed,
13420 * qos is applied based on the array
13421 * interpretation of krcvqs where
13422 * entry 0 is VL0.
13423 *
13424 * The number of vl bits (n) and the number of qpn
13425 * bits (m) are computed to feed both the RSM map table
13426 * and the single rule.
13427 *
13428 */
13429static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13430{
13431 u8 max_by_vl = 0;
13432 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13433 u64 *rsmmap;
13434 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013435 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013436
13437 /* validate */
13438 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13439 num_vls == 1 ||
13440 krcvqsset <= 1)
13441 goto bail;
13442 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13443 if (krcvqs[i] > max_by_vl)
13444 max_by_vl = krcvqs[i];
13445 if (max_by_vl > 32)
13446 goto bail;
13447 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13448 /* determine bits vl */
13449 n = ilog2(num_vls);
13450 /* determine bits for qpn */
13451 m = ilog2(qpns_per_vl);
13452 if ((m + n) > 7)
13453 goto bail;
13454 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13455 goto bail;
13456 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013457 if (!rsmmap)
13458 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013459 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13460 /* init the local copy of the table */
13461 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13462 unsigned tctxt;
13463
13464 for (qpn = 0, tctxt = ctxt;
13465 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13466 unsigned idx, regoff, regidx;
13467
13468 /* generate index <= 128 */
13469 idx = (qpn << n) ^ i;
13470 regoff = (idx % 8) * 8;
13471 regidx = idx / 8;
13472 reg = rsmmap[regidx];
13473 /* replace 0xff with context number */
13474 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13475 << regoff);
13476 reg |= (u64)(tctxt++) << regoff;
13477 rsmmap[regidx] = reg;
13478 if (tctxt == ctxt + krcvqs[i])
13479 tctxt = ctxt;
13480 }
13481 ctxt += krcvqs[i];
13482 }
13483 /* flush cached copies to chip */
13484 for (i = 0; i < NUM_MAP_REGS; i++)
13485 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13486 /* add rule0 */
13487 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13488 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13489 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13490 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13491 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13492 LRH_BTH_MATCH_OFFSET
13493 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13494 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13495 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13496 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13497 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13498 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13499 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13500 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13501 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13502 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13503 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13504 /* Enable RSM */
13505 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13506 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013507 /* map everything else to first context */
13508 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013509 dd->qos_shift = n + 1;
13510 return;
13511bail:
13512 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013513 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013514}
13515
13516static void init_rxe(struct hfi1_devdata *dd)
13517{
13518 /* enable all receive errors */
13519 write_csr(dd, RCV_ERR_MASK, ~0ull);
13520 /* setup QPN map table - start where VL15 context leaves off */
13521 init_qos(
13522 dd,
13523 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13524 /*
13525 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13526 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13527 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13528 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13529 * Max_PayLoad_Size set to its minimum of 128.
13530 *
13531 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13532 * (64 bytes). Max_Payload_Size is possibly modified upward in
13533 * tune_pcie_caps() which is called after this routine.
13534 */
13535}
13536
13537static void init_other(struct hfi1_devdata *dd)
13538{
13539 /* enable all CCE errors */
13540 write_csr(dd, CCE_ERR_MASK, ~0ull);
13541 /* enable *some* Misc errors */
13542 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13543 /* enable all DC errors, except LCB */
13544 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13545 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13546}
13547
13548/*
13549 * Fill out the given AU table using the given CU. A CU is defined in terms
13550 * AUs. The table is a an encoding: given the index, how many AUs does that
13551 * represent?
13552 *
13553 * NOTE: Assumes that the register layout is the same for the
13554 * local and remote tables.
13555 */
13556static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13557 u32 csr0to3, u32 csr4to7)
13558{
13559 write_csr(dd, csr0to3,
13560 0ull <<
13561 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13562 | 1ull <<
13563 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13564 | 2ull * cu <<
13565 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13566 | 4ull * cu <<
13567 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13568 write_csr(dd, csr4to7,
13569 8ull * cu <<
13570 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13571 | 16ull * cu <<
13572 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13573 | 32ull * cu <<
13574 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13575 | 64ull * cu <<
13576 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13577
13578}
13579
13580static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13581{
13582 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13583 SEND_CM_LOCAL_AU_TABLE4_TO7);
13584}
13585
13586void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13587{
13588 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13589 SEND_CM_REMOTE_AU_TABLE4_TO7);
13590}
13591
13592static void init_txe(struct hfi1_devdata *dd)
13593{
13594 int i;
13595
13596 /* enable all PIO, SDMA, general, and Egress errors */
13597 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13598 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13599 write_csr(dd, SEND_ERR_MASK, ~0ull);
13600 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13601
13602 /* enable all per-context and per-SDMA engine errors */
13603 for (i = 0; i < dd->chip_send_contexts; i++)
13604 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13605 for (i = 0; i < dd->chip_sdma_engines; i++)
13606 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13607
13608 /* set the local CU to AU mapping */
13609 assign_local_cm_au_table(dd, dd->vcu);
13610
13611 /*
13612 * Set reasonable default for Credit Return Timer
13613 * Don't set on Simulator - causes it to choke.
13614 */
13615 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13616 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13617}
13618
13619int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13620{
13621 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13622 unsigned sctxt;
13623 int ret = 0;
13624 u64 reg;
13625
13626 if (!rcd || !rcd->sc) {
13627 ret = -EINVAL;
13628 goto done;
13629 }
13630 sctxt = rcd->sc->hw_context;
13631 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13632 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13633 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13634 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13635 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13636 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13637 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13638 /*
13639 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013640 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013641 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013642 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13643 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13644 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13645 }
13646
13647 /* Enable J_KEY check on receive context. */
13648 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13649 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13650 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13651 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13652done:
13653 return ret;
13654}
13655
13656int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13657{
13658 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13659 unsigned sctxt;
13660 int ret = 0;
13661 u64 reg;
13662
13663 if (!rcd || !rcd->sc) {
13664 ret = -EINVAL;
13665 goto done;
13666 }
13667 sctxt = rcd->sc->hw_context;
13668 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13669 /*
13670 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13671 * This check would not have been enabled for A0 h/w, see
13672 * set_ctxt_jkey().
13673 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013674 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013675 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13676 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13677 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13678 }
13679 /* Turn off the J_KEY on the receive side */
13680 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13681done:
13682 return ret;
13683}
13684
13685int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13686{
13687 struct hfi1_ctxtdata *rcd;
13688 unsigned sctxt;
13689 int ret = 0;
13690 u64 reg;
13691
13692 if (ctxt < dd->num_rcv_contexts)
13693 rcd = dd->rcd[ctxt];
13694 else {
13695 ret = -EINVAL;
13696 goto done;
13697 }
13698 if (!rcd || !rcd->sc) {
13699 ret = -EINVAL;
13700 goto done;
13701 }
13702 sctxt = rcd->sc->hw_context;
13703 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13704 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13705 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13706 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13707 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13708 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13709done:
13710 return ret;
13711}
13712
13713int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13714{
13715 struct hfi1_ctxtdata *rcd;
13716 unsigned sctxt;
13717 int ret = 0;
13718 u64 reg;
13719
13720 if (ctxt < dd->num_rcv_contexts)
13721 rcd = dd->rcd[ctxt];
13722 else {
13723 ret = -EINVAL;
13724 goto done;
13725 }
13726 if (!rcd || !rcd->sc) {
13727 ret = -EINVAL;
13728 goto done;
13729 }
13730 sctxt = rcd->sc->hw_context;
13731 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13732 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13733 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13734 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13735done:
13736 return ret;
13737}
13738
13739/*
13740 * Start doing the clean up the the chip. Our clean up happens in multiple
13741 * stages and this is just the first.
13742 */
13743void hfi1_start_cleanup(struct hfi1_devdata *dd)
13744{
13745 free_cntrs(dd);
13746 free_rcverr(dd);
13747 clean_up_interrupts(dd);
13748}
13749
13750#define HFI_BASE_GUID(dev) \
13751 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13752
13753/*
13754 * Certain chip functions need to be initialized only once per asic
13755 * instead of per-device. This function finds the peer device and
13756 * checks whether that chip initialization needs to be done by this
13757 * device.
13758 */
13759static void asic_should_init(struct hfi1_devdata *dd)
13760{
13761 unsigned long flags;
13762 struct hfi1_devdata *tmp, *peer = NULL;
13763
13764 spin_lock_irqsave(&hfi1_devs_lock, flags);
13765 /* Find our peer device */
13766 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13767 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13768 dd->unit != tmp->unit) {
13769 peer = tmp;
13770 break;
13771 }
13772 }
13773
13774 /*
13775 * "Claim" the ASIC for initialization if it hasn't been
13776 " "claimed" yet.
13777 */
13778 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13779 dd->flags |= HFI1_DO_INIT_ASIC;
13780 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13781}
13782
Dean Luick5d9157a2015-11-16 21:59:34 -050013783/*
13784 * Set dd->boardname. Use a generic name if a name is not returned from
13785 * EFI variable space.
13786 *
13787 * Return 0 on success, -ENOMEM if space could not be allocated.
13788 */
13789static int obtain_boardname(struct hfi1_devdata *dd)
13790{
13791 /* generic board description */
13792 const char generic[] =
13793 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13794 unsigned long size;
13795 int ret;
13796
13797 ret = read_hfi1_efi_var(dd, "description", &size,
13798 (void **)&dd->boardname);
13799 if (ret) {
13800 dd_dev_err(dd, "Board description not found\n");
13801 /* use generic description */
13802 dd->boardname = kstrdup(generic, GFP_KERNEL);
13803 if (!dd->boardname)
13804 return -ENOMEM;
13805 }
13806 return 0;
13807}
13808
Mike Marciniszyn77241052015-07-30 15:17:43 -040013809/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013810 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013811 * @dev: the pci_dev for hfi1_ib device
13812 * @ent: pci_device_id struct for this dev
13813 *
13814 * Also allocates, initializes, and returns the devdata struct for this
13815 * device instance
13816 *
13817 * This is global, and is called directly at init to set up the
13818 * chip-specific function pointers for later use.
13819 */
13820struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13821 const struct pci_device_id *ent)
13822{
13823 struct hfi1_devdata *dd;
13824 struct hfi1_pportdata *ppd;
13825 u64 reg;
13826 int i, ret;
13827 static const char * const inames[] = { /* implementation names */
13828 "RTL silicon",
13829 "RTL VCS simulation",
13830 "RTL FPGA emulation",
13831 "Functional simulator"
13832 };
13833
13834 dd = hfi1_alloc_devdata(pdev,
13835 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13836 if (IS_ERR(dd))
13837 goto bail;
13838 ppd = dd->pport;
13839 for (i = 0; i < dd->num_pports; i++, ppd++) {
13840 int vl;
13841 /* init common fields */
13842 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13843 /* DC supports 4 link widths */
13844 ppd->link_width_supported =
13845 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13846 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13847 ppd->link_width_downgrade_supported =
13848 ppd->link_width_supported;
13849 /* start out enabling only 4X */
13850 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13851 ppd->link_width_downgrade_enabled =
13852 ppd->link_width_downgrade_supported;
13853 /* link width active is 0 when link is down */
13854 /* link width downgrade active is 0 when link is down */
13855
13856 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13857 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13858 hfi1_early_err(&pdev->dev,
13859 "Invalid num_vls %u, using %u VLs\n",
13860 num_vls, HFI1_MAX_VLS_SUPPORTED);
13861 num_vls = HFI1_MAX_VLS_SUPPORTED;
13862 }
13863 ppd->vls_supported = num_vls;
13864 ppd->vls_operational = ppd->vls_supported;
13865 /* Set the default MTU. */
13866 for (vl = 0; vl < num_vls; vl++)
13867 dd->vld[vl].mtu = hfi1_max_mtu;
13868 dd->vld[15].mtu = MAX_MAD_PACKET;
13869 /*
13870 * Set the initial values to reasonable default, will be set
13871 * for real when link is up.
13872 */
13873 ppd->lstate = IB_PORT_DOWN;
13874 ppd->overrun_threshold = 0x4;
13875 ppd->phy_error_threshold = 0xf;
13876 ppd->port_crc_mode_enabled = link_crc_mask;
13877 /* initialize supported LTP CRC mode */
13878 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13879 /* initialize enabled LTP CRC mode */
13880 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13881 /* start in offline */
13882 ppd->host_link_state = HLS_DN_OFFLINE;
13883 init_vl_arb_caches(ppd);
13884 }
13885
13886 dd->link_default = HLS_DN_POLL;
13887
13888 /*
13889 * Do remaining PCIe setup and save PCIe values in dd.
13890 * Any error printing is already done by the init code.
13891 * On return, we have the chip mapped.
13892 */
13893 ret = hfi1_pcie_ddinit(dd, pdev, ent);
13894 if (ret < 0)
13895 goto bail_free;
13896
13897 /* verify that reads actually work, save revision for reset check */
13898 dd->revision = read_csr(dd, CCE_REVISION);
13899 if (dd->revision == ~(u64)0) {
13900 dd_dev_err(dd, "cannot read chip CSRs\n");
13901 ret = -EINVAL;
13902 goto bail_cleanup;
13903 }
13904 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13905 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13906 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13907 & CCE_REVISION_CHIP_REV_MINOR_MASK;
13908
13909 /* obtain the hardware ID - NOT related to unit, which is a
13910 software enumeration */
13911 reg = read_csr(dd, CCE_REVISION2);
13912 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13913 & CCE_REVISION2_HFI_ID_MASK;
13914 /* the variable size will remove unwanted bits */
13915 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13916 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13917 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13918 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13919 (int)dd->irev);
13920
13921 /* speeds the hardware can support */
13922 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13923 /* speeds allowed to run at */
13924 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13925 /* give a reasonable active value, will be set on link up */
13926 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13927
13928 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13929 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13930 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13931 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13932 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13933 /* fix up link widths for emulation _p */
13934 ppd = dd->pport;
13935 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13936 ppd->link_width_supported =
13937 ppd->link_width_enabled =
13938 ppd->link_width_downgrade_supported =
13939 ppd->link_width_downgrade_enabled =
13940 OPA_LINK_WIDTH_1X;
13941 }
13942 /* insure num_vls isn't larger than number of sdma engines */
13943 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
13944 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050013945 num_vls, dd->chip_sdma_engines);
13946 num_vls = dd->chip_sdma_engines;
13947 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013948 }
13949
13950 /*
13951 * Convert the ns parameter to the 64 * cclocks used in the CSR.
13952 * Limit the max if larger than the field holds. If timeout is
13953 * non-zero, then the calculated field will be at least 1.
13954 *
13955 * Must be after icode is set up - the cclock rate depends
13956 * on knowing the hardware being used.
13957 */
13958 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
13959 if (dd->rcv_intr_timeout_csr >
13960 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
13961 dd->rcv_intr_timeout_csr =
13962 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
13963 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
13964 dd->rcv_intr_timeout_csr = 1;
13965
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013966 /* needs to be done before we look for the peer device */
13967 read_guid(dd);
13968
13969 /* should this device init the ASIC block? */
13970 asic_should_init(dd);
13971
Mike Marciniszyn77241052015-07-30 15:17:43 -040013972 /* obtain chip sizes, reset chip CSRs */
13973 init_chip(dd);
13974
13975 /* read in the PCIe link speed information */
13976 ret = pcie_speeds(dd);
13977 if (ret)
13978 goto bail_cleanup;
13979
Mike Marciniszyn77241052015-07-30 15:17:43 -040013980 /* read in firmware */
13981 ret = hfi1_firmware_init(dd);
13982 if (ret)
13983 goto bail_cleanup;
13984
13985 /*
13986 * In general, the PCIe Gen3 transition must occur after the
13987 * chip has been idled (so it won't initiate any PCIe transactions
13988 * e.g. an interrupt) and before the driver changes any registers
13989 * (the transition will reset the registers).
13990 *
13991 * In particular, place this call after:
13992 * - init_chip() - the chip will not initiate any PCIe transactions
13993 * - pcie_speeds() - reads the current link speed
13994 * - hfi1_firmware_init() - the needed firmware is ready to be
13995 * downloaded
13996 */
13997 ret = do_pcie_gen3_transition(dd);
13998 if (ret)
13999 goto bail_cleanup;
14000
14001 /* start setting dd values and adjusting CSRs */
14002 init_early_variables(dd);
14003
14004 parse_platform_config(dd);
14005
Dean Luick5d9157a2015-11-16 21:59:34 -050014006 ret = obtain_boardname(dd);
14007 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014008 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014009
14010 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014011 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014012 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014013 (u32)dd->majrev,
14014 (u32)dd->minrev,
14015 (dd->revision >> CCE_REVISION_SW_SHIFT)
14016 & CCE_REVISION_SW_MASK);
14017
14018 ret = set_up_context_variables(dd);
14019 if (ret)
14020 goto bail_cleanup;
14021
14022 /* set initial RXE CSRs */
14023 init_rxe(dd);
14024 /* set initial TXE CSRs */
14025 init_txe(dd);
14026 /* set initial non-RXE, non-TXE CSRs */
14027 init_other(dd);
14028 /* set up KDETH QP prefix in both RX and TX CSRs */
14029 init_kdeth_qp(dd);
14030
14031 /* send contexts must be set up before receive contexts */
14032 ret = init_send_contexts(dd);
14033 if (ret)
14034 goto bail_cleanup;
14035
14036 ret = hfi1_create_ctxts(dd);
14037 if (ret)
14038 goto bail_cleanup;
14039
14040 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14041 /*
14042 * rcd[0] is guaranteed to be valid by this point. Also, all
14043 * context are using the same value, as per the module parameter.
14044 */
14045 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14046
14047 ret = init_pervl_scs(dd);
14048 if (ret)
14049 goto bail_cleanup;
14050
14051 /* sdma init */
14052 for (i = 0; i < dd->num_pports; ++i) {
14053 ret = sdma_init(dd, i);
14054 if (ret)
14055 goto bail_cleanup;
14056 }
14057
14058 /* use contexts created by hfi1_create_ctxts */
14059 ret = set_up_interrupts(dd);
14060 if (ret)
14061 goto bail_cleanup;
14062
14063 /* set up LCB access - must be after set_up_interrupts() */
14064 init_lcb_access(dd);
14065
14066 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14067 dd->base_guid & 0xFFFFFF);
14068
14069 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14070 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14071 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14072
14073 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14074 if (ret)
14075 goto bail_clear_intr;
14076 check_fabric_firmware_versions(dd);
14077
14078 thermal_init(dd);
14079
14080 ret = init_cntrs(dd);
14081 if (ret)
14082 goto bail_clear_intr;
14083
14084 ret = init_rcverr(dd);
14085 if (ret)
14086 goto bail_free_cntrs;
14087
14088 ret = eprom_init(dd);
14089 if (ret)
14090 goto bail_free_rcverr;
14091
14092 goto bail;
14093
14094bail_free_rcverr:
14095 free_rcverr(dd);
14096bail_free_cntrs:
14097 free_cntrs(dd);
14098bail_clear_intr:
14099 clean_up_interrupts(dd);
14100bail_cleanup:
14101 hfi1_pcie_ddcleanup(dd);
14102bail_free:
14103 hfi1_free_devdata(dd);
14104 dd = ERR_PTR(ret);
14105bail:
14106 return dd;
14107}
14108
14109static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14110 u32 dw_len)
14111{
14112 u32 delta_cycles;
14113 u32 current_egress_rate = ppd->current_egress_rate;
14114 /* rates here are in units of 10^6 bits/sec */
14115
14116 if (desired_egress_rate == -1)
14117 return 0; /* shouldn't happen */
14118
14119 if (desired_egress_rate >= current_egress_rate)
14120 return 0; /* we can't help go faster, only slower */
14121
14122 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14123 egress_cycles(dw_len * 4, current_egress_rate);
14124
14125 return (u16)delta_cycles;
14126}
14127
14128
14129/**
14130 * create_pbc - build a pbc for transmission
14131 * @flags: special case flags or-ed in built pbc
14132 * @srate: static rate
14133 * @vl: vl
14134 * @dwlen: dword length (header words + data words + pbc words)
14135 *
14136 * Create a PBC with the given flags, rate, VL, and length.
14137 *
14138 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14139 * for verbs, which does not use this PSM feature. The lone other caller
14140 * is for the diagnostic interface which calls this if the user does not
14141 * supply their own PBC.
14142 */
14143u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14144 u32 dw_len)
14145{
14146 u64 pbc, delay = 0;
14147
14148 if (unlikely(srate_mbs))
14149 delay = delay_cycles(ppd, srate_mbs, dw_len);
14150
14151 pbc = flags
14152 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14153 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14154 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14155 | (dw_len & PBC_LENGTH_DWS_MASK)
14156 << PBC_LENGTH_DWS_SHIFT;
14157
14158 return pbc;
14159}
14160
14161#define SBUS_THERMAL 0x4f
14162#define SBUS_THERM_MONITOR_MODE 0x1
14163
14164#define THERM_FAILURE(dev, ret, reason) \
14165 dd_dev_err((dd), \
14166 "Thermal sensor initialization failed: %s (%d)\n", \
14167 (reason), (ret))
14168
14169/*
14170 * Initialize the Avago Thermal sensor.
14171 *
14172 * After initialization, enable polling of thermal sensor through
14173 * SBus interface. In order for this to work, the SBus Master
14174 * firmware has to be loaded due to the fact that the HW polling
14175 * logic uses SBus interrupts, which are not supported with
14176 * default firmware. Otherwise, no data will be returned through
14177 * the ASIC_STS_THERM CSR.
14178 */
14179static int thermal_init(struct hfi1_devdata *dd)
14180{
14181 int ret = 0;
14182
14183 if (dd->icode != ICODE_RTL_SILICON ||
14184 !(dd->flags & HFI1_DO_INIT_ASIC))
14185 return ret;
14186
14187 acquire_hw_mutex(dd);
14188 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014189 /* Disable polling of thermal readings */
14190 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14191 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014192 /* Thermal Sensor Initialization */
14193 /* Step 1: Reset the Thermal SBus Receiver */
14194 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14195 RESET_SBUS_RECEIVER, 0);
14196 if (ret) {
14197 THERM_FAILURE(dd, ret, "Bus Reset");
14198 goto done;
14199 }
14200 /* Step 2: Set Reset bit in Thermal block */
14201 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14202 WRITE_SBUS_RECEIVER, 0x1);
14203 if (ret) {
14204 THERM_FAILURE(dd, ret, "Therm Block Reset");
14205 goto done;
14206 }
14207 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14208 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14209 WRITE_SBUS_RECEIVER, 0x32);
14210 if (ret) {
14211 THERM_FAILURE(dd, ret, "Write Clock Div");
14212 goto done;
14213 }
14214 /* Step 4: Select temperature mode */
14215 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14216 WRITE_SBUS_RECEIVER,
14217 SBUS_THERM_MONITOR_MODE);
14218 if (ret) {
14219 THERM_FAILURE(dd, ret, "Write Mode Sel");
14220 goto done;
14221 }
14222 /* Step 5: De-assert block reset and start conversion */
14223 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14224 WRITE_SBUS_RECEIVER, 0x2);
14225 if (ret) {
14226 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14227 goto done;
14228 }
14229 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14230 msleep(22);
14231
14232 /* Enable polling of thermal readings */
14233 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14234done:
14235 release_hw_mutex(dd);
14236 return ret;
14237}
14238
14239static void handle_temp_err(struct hfi1_devdata *dd)
14240{
14241 struct hfi1_pportdata *ppd = &dd->pport[0];
14242 /*
14243 * Thermal Critical Interrupt
14244 * Put the device into forced freeze mode, take link down to
14245 * offline, and put DC into reset.
14246 */
14247 dd_dev_emerg(dd,
14248 "Critical temperature reached! Forcing device into freeze mode!\n");
14249 dd->flags |= HFI1_FORCED_FREEZE;
14250 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14251 /*
14252 * Shut DC down as much and as quickly as possible.
14253 *
14254 * Step 1: Take the link down to OFFLINE. This will cause the
14255 * 8051 to put the Serdes in reset. However, we don't want to
14256 * go through the entire link state machine since we want to
14257 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14258 * but rather an attempt to save the chip.
14259 * Code below is almost the same as quiet_serdes() but avoids
14260 * all the extra work and the sleeps.
14261 */
14262 ppd->driver_link_ready = 0;
14263 ppd->link_enabled = 0;
14264 set_physical_link_state(dd, PLS_OFFLINE |
14265 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14266 /*
14267 * Step 2: Shutdown LCB and 8051
14268 * After shutdown, do not restore DC_CFG_RESET value.
14269 */
14270 dc_shutdown(dd);
14271}