blob: 4d70a960ff5460e641c633313d82819e8e9ee8c0 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500127#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
424 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
427 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
513/*
514 * TXE Egress Error flags
515 */
516#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
517static struct flag_table egress_err_status_flags[] = {
518/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
519/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
520/* 2 reserved */
521/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
522 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
523/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
524/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
525/* 6 reserved */
526/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
527 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
528/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
529 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
530/* 9-10 reserved */
531/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
532 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
533/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
534/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
535/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
536/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
537/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
538 SEES(TX_SDMA0_DISALLOWED_PACKET)),
539/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
540 SEES(TX_SDMA1_DISALLOWED_PACKET)),
541/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
542 SEES(TX_SDMA2_DISALLOWED_PACKET)),
543/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
544 SEES(TX_SDMA3_DISALLOWED_PACKET)),
545/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
546 SEES(TX_SDMA4_DISALLOWED_PACKET)),
547/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
548 SEES(TX_SDMA5_DISALLOWED_PACKET)),
549/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
550 SEES(TX_SDMA6_DISALLOWED_PACKET)),
551/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
552 SEES(TX_SDMA7_DISALLOWED_PACKET)),
553/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
554 SEES(TX_SDMA8_DISALLOWED_PACKET)),
555/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
556 SEES(TX_SDMA9_DISALLOWED_PACKET)),
557/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
558 SEES(TX_SDMA10_DISALLOWED_PACKET)),
559/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
560 SEES(TX_SDMA11_DISALLOWED_PACKET)),
561/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
562 SEES(TX_SDMA12_DISALLOWED_PACKET)),
563/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
564 SEES(TX_SDMA13_DISALLOWED_PACKET)),
565/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
566 SEES(TX_SDMA14_DISALLOWED_PACKET)),
567/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
568 SEES(TX_SDMA15_DISALLOWED_PACKET)),
569/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
570 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
571/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
572 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
573/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
574 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
575/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
577/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
579/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
581/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
583/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
585/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
587/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
588/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
589/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
590/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
591/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
592/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
593/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
594/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
595/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
596/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
597/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
598/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
599/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
600/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
601/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
602/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
603/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
604/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
605/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
606/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
607/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
608/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
609 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
610/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
611 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
612};
613
614/*
615 * TXE Egress Error Info flags
616 */
617#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
618static struct flag_table egress_err_info_flags[] = {
619/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
620/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
621/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
623/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
624/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
625/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
626/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
627/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
628/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
629/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
630/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
631/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
632/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
633/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
634/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
635/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
636/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
637/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
638/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
639/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
640/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
641};
642
643/* TXE Egress errors that cause an SPC freeze */
644#define ALL_TXE_EGRESS_FREEZE_ERR \
645 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
646 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
647 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
648 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
649 | SEES(TX_LAUNCH_CSR_PARITY) \
650 | SEES(TX_SBRD_CTL_CSR_PARITY) \
651 | SEES(TX_CONFIG_PARITY) \
652 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
653 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
654 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
655 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
656 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
661 | SEES(TX_CREDIT_RETURN_PARITY))
662
663/*
664 * TXE Send error flags
665 */
666#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
667static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500668/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400669/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
670/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
671};
672
673/*
674 * TXE Send Context Error flags and consequences
675 */
676static struct flag_table sc_err_status_flags[] = {
677/* 0*/ FLAG_ENTRY("InconsistentSop",
678 SEC_PACKET_DROPPED | SEC_SC_HALTED,
679 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
680/* 1*/ FLAG_ENTRY("DisallowedPacket",
681 SEC_PACKET_DROPPED | SEC_SC_HALTED,
682 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
683/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
684 SEC_WRITE_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
686/* 3*/ FLAG_ENTRY("WriteOverflow",
687 SEC_WRITE_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
689/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
692/* 5-63 reserved*/
693};
694
695/*
696 * RXE Receive Error flags
697 */
698#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
699static struct flag_table rxe_err_status_flags[] = {
700/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
701/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
702/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
703/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
704/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
705/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
706/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
707/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
708/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
709/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
710/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
711/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
712/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
713/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
714/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
715/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
716/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
717 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
718/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
719/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
720/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
721 RXES(RBUF_BLOCK_LIST_READ_UNC)),
722/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
723 RXES(RBUF_BLOCK_LIST_READ_COR)),
724/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
725 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
726/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
727 RXES(RBUF_CSR_QENT_CNT_PARITY)),
728/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
729 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
730/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
731 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
732/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
733/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
734/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
735 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
736/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
737/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
738/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
739/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
740/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
741/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
742/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
743/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
744 RXES(RBUF_FL_INITDONE_PARITY)),
745/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
746 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
747/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
748/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
749/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
750/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
751 RXES(LOOKUP_DES_PART1_UNC_COR)),
752/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
753 RXES(LOOKUP_DES_PART2_PARITY)),
754/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
755/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
756/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
757/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
758/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
759/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
760/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
761/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
762/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
763/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
764/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
765/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
766/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
767/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
768/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
769/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
770/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
771/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
772/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
773/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
774/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
775/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
776};
777
778/* RXE errors that will trigger an SPC freeze */
779#define ALL_RXE_FREEZE_ERR \
780 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
781 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
782 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
783 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
784 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
824
825#define RXE_FREEZE_ABORT_MASK \
826 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
827 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
828 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
829
830/*
831 * DCC Error Flags
832 */
833#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
834static struct flag_table dcc_err_flags[] = {
835 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
836 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
837 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
838 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
839 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
840 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
841 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
842 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
843 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
844 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
845 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
846 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
847 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
848 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
849 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
850 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
851 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
852 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
853 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
854 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
855 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
856 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
857 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
858 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
859 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
860 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
861 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
862 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
863 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
864 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
865 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
866 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
867 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
868 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
869 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
870 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
871 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
872 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
873 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
874 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
875 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
876 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
877 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
878 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
879 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
880 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
881};
882
883/*
884 * LCB error flags
885 */
886#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
887static struct flag_table lcb_err_flags[] = {
888/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
889/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
890/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
891/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
892 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
893/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
894/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
895/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
896/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
897/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
898/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
899/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
900/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
901/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
902/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
903 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
904/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
905/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
906/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
907/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
908/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
909/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
910 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
911/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
912/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
913/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
914/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
915/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
916/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
917/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
918 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
919/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
920/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
921 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
922/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
923 LCBE(REDUNDANT_FLIT_PARITY_ERR))
924};
925
926/*
927 * DC8051 Error Flags
928 */
929#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
930static struct flag_table dc8051_err_flags[] = {
931 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
932 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
933 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
934 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
935 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
936 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
937 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
938 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
939 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
940 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
941 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
942};
943
944/*
945 * DC8051 Information Error flags
946 *
947 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
948 */
949static struct flag_table dc8051_info_err_flags[] = {
950 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
951 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
952 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
953 FLAG_ENTRY0("Serdes internal loopback failure",
954 FAILED_SERDES_INTERNAL_LOOPBACK),
955 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
956 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
957 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
958 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
959 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
960 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
961 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
962 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
963};
964
965/*
966 * DC8051 Information Host Information flags
967 *
968 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
969 */
970static struct flag_table dc8051_info_host_msg_flags[] = {
971 FLAG_ENTRY0("Host request done", 0x0001),
972 FLAG_ENTRY0("BC SMA message", 0x0002),
973 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
974 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
975 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
976 FLAG_ENTRY0("External device config request", 0x0020),
977 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
978 FLAG_ENTRY0("LinkUp achieved", 0x0080),
979 FLAG_ENTRY0("Link going down", 0x0100),
980};
981
982
983static u32 encoded_size(u32 size);
984static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
985static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
986static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
987 u8 *continuous);
988static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
989 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
990static void read_vc_remote_link_width(struct hfi1_devdata *dd,
991 u8 *remote_tx_rate, u16 *link_widths);
992static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
993 u8 *flag_bits, u16 *link_widths);
994static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
995 u8 *device_rev);
996static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
997static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
998static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
999 u8 *tx_polarity_inversion,
1000 u8 *rx_polarity_inversion, u8 *max_rate);
1001static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1002 unsigned int context, u64 err_status);
1003static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1004static void handle_dcc_err(struct hfi1_devdata *dd,
1005 unsigned int context, u64 err_status);
1006static void handle_lcb_err(struct hfi1_devdata *dd,
1007 unsigned int context, u64 err_status);
1008static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void set_partition_keys(struct hfi1_pportdata *);
1017static const char *link_state_name(u32 state);
1018static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1019 u32 state);
1020static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1021 u64 *out_data);
1022static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1023static int thermal_init(struct hfi1_devdata *dd);
1024
1025static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1026 int msecs);
1027static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1028static void handle_temp_err(struct hfi1_devdata *);
1029static void dc_shutdown(struct hfi1_devdata *);
1030static void dc_start(struct hfi1_devdata *);
1031
1032/*
1033 * Error interrupt table entry. This is used as input to the interrupt
1034 * "clear down" routine used for all second tier error interrupt register.
1035 * Second tier interrupt registers have a single bit representing them
1036 * in the top-level CceIntStatus.
1037 */
1038struct err_reg_info {
1039 u32 status; /* status CSR offset */
1040 u32 clear; /* clear CSR offset */
1041 u32 mask; /* mask CSR offset */
1042 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1043 const char *desc;
1044};
1045
1046#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1047#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1048#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1049
1050/*
1051 * Helpers for building HFI and DC error interrupt table entries. Different
1052 * helpers are needed because of inconsistent register names.
1053 */
1054#define EE(reg, handler, desc) \
1055 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1056 handler, desc }
1057#define DC_EE1(reg, handler, desc) \
1058 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1059#define DC_EE2(reg, handler, desc) \
1060 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1061
1062/*
1063 * Table of the "misc" grouping of error interrupts. Each entry refers to
1064 * another register containing more information.
1065 */
1066static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1067/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1068/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1069/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1070/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1071/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1072/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1073/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1074/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1075 /* the rest are reserved */
1076};
1077
1078/*
1079 * Index into the Various section of the interrupt sources
1080 * corresponding to the Critical Temperature interrupt.
1081 */
1082#define TCRIT_INT_SOURCE 4
1083
1084/*
1085 * SDMA error interrupt entry - refers to another register containing more
1086 * information.
1087 */
1088static const struct err_reg_info sdma_eng_err =
1089 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1090
1091static const struct err_reg_info various_err[NUM_VARIOUS] = {
1092/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1093/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1094/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1095/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1096/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1097 /* rest are reserved */
1098};
1099
1100/*
1101 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1102 * register can not be derived from the MTU value because 10K is not
1103 * a power of 2. Therefore, we need a constant. Everything else can
1104 * be calculated.
1105 */
1106#define DCC_CFG_PORT_MTU_CAP_10240 7
1107
1108/*
1109 * Table of the DC grouping of error interrupts. Each entry refers to
1110 * another register containing more information.
1111 */
1112static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1113/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1114/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1115/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1116/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1117 /* the rest are reserved */
1118};
1119
1120struct cntr_entry {
1121 /*
1122 * counter name
1123 */
1124 char *name;
1125
1126 /*
1127 * csr to read for name (if applicable)
1128 */
1129 u64 csr;
1130
1131 /*
1132 * offset into dd or ppd to store the counter's value
1133 */
1134 int offset;
1135
1136 /*
1137 * flags
1138 */
1139 u8 flags;
1140
1141 /*
1142 * accessor for stat element, context either dd or ppd
1143 */
1144 u64 (*rw_cntr)(const struct cntr_entry *,
1145 void *context,
1146 int vl,
1147 int mode,
1148 u64 data);
1149};
1150
1151#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1152#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1153
1154#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1155{ \
1156 name, \
1157 csr, \
1158 offset, \
1159 flags, \
1160 accessor \
1161}
1162
1163/* 32bit RXE */
1164#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1165CNTR_ELEM(#name, \
1166 (counter * 8 + RCV_COUNTER_ARRAY32), \
1167 0, flags | CNTR_32BIT, \
1168 port_access_u32_csr)
1169
1170#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 dev_access_u32_csr)
1175
1176/* 64bit RXE */
1177#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1178CNTR_ELEM(#name, \
1179 (counter * 8 + RCV_COUNTER_ARRAY64), \
1180 0, flags, \
1181 port_access_u64_csr)
1182
1183#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 dev_access_u64_csr)
1188
1189#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1190#define OVR_ELM(ctx) \
1191CNTR_ELEM("RcvHdrOvr" #ctx, \
1192 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1193 0, CNTR_NORMAL, port_access_u64_csr)
1194
1195/* 32bit TXE */
1196#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1197CNTR_ELEM(#name, \
1198 (counter * 8 + SEND_COUNTER_ARRAY32), \
1199 0, flags | CNTR_32BIT, \
1200 port_access_u32_csr)
1201
1202/* 64bit TXE */
1203#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1204CNTR_ELEM(#name, \
1205 (counter * 8 + SEND_COUNTER_ARRAY64), \
1206 0, flags, \
1207 port_access_u64_csr)
1208
1209# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name,\
1211 counter * 8 + SEND_COUNTER_ARRAY64, \
1212 0, \
1213 flags, \
1214 dev_access_u64_csr)
1215
1216/* CCE */
1217#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1218CNTR_ELEM(#name, \
1219 (counter * 8 + CCE_COUNTER_ARRAY32), \
1220 0, flags | CNTR_32BIT, \
1221 dev_access_u32_csr)
1222
1223#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229/* DC */
1230#define DC_PERF_CNTR(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 counter, \
1233 0, \
1234 flags, \
1235 dev_access_u64_csr)
1236
1237#define DC_PERF_CNTR_LCB(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 counter, \
1240 0, \
1241 flags, \
1242 dc_access_lcb_cntr)
1243
1244/* ibp counters */
1245#define SW_IBP_CNTR(name, cntr) \
1246CNTR_ELEM(#name, \
1247 0, \
1248 0, \
1249 CNTR_SYNTH, \
1250 access_ibp_##cntr)
1251
1252u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1253{
1254 u64 val;
1255
1256 if (dd->flags & HFI1_PRESENT) {
1257 val = readq((void __iomem *)dd->kregbase + offset);
1258 return val;
1259 }
1260 return -1;
1261}
1262
1263void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1264{
1265 if (dd->flags & HFI1_PRESENT)
1266 writeq(value, (void __iomem *)dd->kregbase + offset);
1267}
1268
1269void __iomem *get_csr_addr(
1270 struct hfi1_devdata *dd,
1271 u32 offset)
1272{
1273 return (void __iomem *)dd->kregbase + offset;
1274}
1275
1276static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1277 int mode, u64 value)
1278{
1279 u64 ret;
1280
1281
1282 if (mode == CNTR_MODE_R) {
1283 ret = read_csr(dd, csr);
1284 } else if (mode == CNTR_MODE_W) {
1285 write_csr(dd, csr, value);
1286 ret = value;
1287 } else {
1288 dd_dev_err(dd, "Invalid cntr register access mode");
1289 return 0;
1290 }
1291
1292 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1293 return ret;
1294}
1295
1296/* Dev Access */
1297static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1298 void *context, int vl, int mode, u64 data)
1299{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301300 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001301 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001302
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001303 if (entry->flags & CNTR_SDMA) {
1304 if (vl == CNTR_INVALID_VL)
1305 return 0;
1306 csr += 0x100 * vl;
1307 } else {
1308 if (vl != CNTR_INVALID_VL)
1309 return 0;
1310 }
1311 return read_write_csr(dd, csr, mode, data);
1312}
1313
1314static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1315 void *context, int idx, int mode, u64 data)
1316{
1317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1318
1319 if (dd->per_sdma && idx < dd->num_sdma)
1320 return dd->per_sdma[idx].err_cnt;
1321 return 0;
1322}
1323
1324static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1325 void *context, int idx, int mode, u64 data)
1326{
1327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328
1329 if (dd->per_sdma && idx < dd->num_sdma)
1330 return dd->per_sdma[idx].sdma_int_cnt;
1331 return 0;
1332}
1333
1334static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1335 void *context, int idx, int mode, u64 data)
1336{
1337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338
1339 if (dd->per_sdma && idx < dd->num_sdma)
1340 return dd->per_sdma[idx].idle_int_cnt;
1341 return 0;
1342}
1343
1344static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1345 void *context, int idx, int mode,
1346 u64 data)
1347{
1348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1349
1350 if (dd->per_sdma && idx < dd->num_sdma)
1351 return dd->per_sdma[idx].progress_int_cnt;
1352 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001353}
1354
1355static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1356 int vl, int mode, u64 data)
1357{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301358 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359
1360 u64 val = 0;
1361 u64 csr = entry->csr;
1362
1363 if (entry->flags & CNTR_VL) {
1364 if (vl == CNTR_INVALID_VL)
1365 return 0;
1366 csr += 8 * vl;
1367 } else {
1368 if (vl != CNTR_INVALID_VL)
1369 return 0;
1370 }
1371
1372 val = read_write_csr(dd, csr, mode, data);
1373 return val;
1374}
1375
1376static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1377 int vl, int mode, u64 data)
1378{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301379 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001380 u32 csr = entry->csr;
1381 int ret = 0;
1382
1383 if (vl != CNTR_INVALID_VL)
1384 return 0;
1385 if (mode == CNTR_MODE_R)
1386 ret = read_lcb_csr(dd, csr, &data);
1387 else if (mode == CNTR_MODE_W)
1388 ret = write_lcb_csr(dd, csr, data);
1389
1390 if (ret) {
1391 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1392 return 0;
1393 }
1394
1395 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1396 return data;
1397}
1398
1399/* Port Access */
1400static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1401 int vl, int mode, u64 data)
1402{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301403 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001404
1405 if (vl != CNTR_INVALID_VL)
1406 return 0;
1407 return read_write_csr(ppd->dd, entry->csr, mode, data);
1408}
1409
1410static u64 port_access_u64_csr(const struct cntr_entry *entry,
1411 void *context, int vl, int mode, u64 data)
1412{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301413 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001414 u64 val;
1415 u64 csr = entry->csr;
1416
1417 if (entry->flags & CNTR_VL) {
1418 if (vl == CNTR_INVALID_VL)
1419 return 0;
1420 csr += 8 * vl;
1421 } else {
1422 if (vl != CNTR_INVALID_VL)
1423 return 0;
1424 }
1425 val = read_write_csr(ppd->dd, csr, mode, data);
1426 return val;
1427}
1428
1429/* Software defined */
1430static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1431 u64 data)
1432{
1433 u64 ret;
1434
1435 if (mode == CNTR_MODE_R) {
1436 ret = *cntr;
1437 } else if (mode == CNTR_MODE_W) {
1438 *cntr = data;
1439 ret = data;
1440 } else {
1441 dd_dev_err(dd, "Invalid cntr sw access mode");
1442 return 0;
1443 }
1444
1445 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1446
1447 return ret;
1448}
1449
1450static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1451 int vl, int mode, u64 data)
1452{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301453 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001454
1455 if (vl != CNTR_INVALID_VL)
1456 return 0;
1457 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1458}
1459
1460static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1461 int vl, int mode, u64 data)
1462{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301463 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464
1465 if (vl != CNTR_INVALID_VL)
1466 return 0;
1467 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1468}
1469
Dean Luick6d014532015-12-01 15:38:23 -05001470static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1471 void *context, int vl, int mode,
1472 u64 data)
1473{
1474 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1475
1476 if (vl != CNTR_INVALID_VL)
1477 return 0;
1478 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1479}
1480
Mike Marciniszyn77241052015-07-30 15:17:43 -04001481static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1482 void *context, int vl, int mode, u64 data)
1483{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301484 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001485
1486 if (vl != CNTR_INVALID_VL)
1487 return 0;
1488
1489 return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1490}
1491
1492static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1493 void *context, int vl, int mode, u64 data)
1494{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301495 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496
1497 if (vl != CNTR_INVALID_VL)
1498 return 0;
1499
1500 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1501 mode, data);
1502}
1503
1504static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1505 void *context, int vl, int mode, u64 data)
1506{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301507 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001508
1509 if (vl != CNTR_INVALID_VL)
1510 return 0;
1511
1512 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1513 mode, data);
1514}
1515
1516u64 get_all_cpu_total(u64 __percpu *cntr)
1517{
1518 int cpu;
1519 u64 counter = 0;
1520
1521 for_each_possible_cpu(cpu)
1522 counter += *per_cpu_ptr(cntr, cpu);
1523 return counter;
1524}
1525
1526static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1527 u64 __percpu *cntr,
1528 int vl, int mode, u64 data)
1529{
1530
1531 u64 ret = 0;
1532
1533 if (vl != CNTR_INVALID_VL)
1534 return 0;
1535
1536 if (mode == CNTR_MODE_R) {
1537 ret = get_all_cpu_total(cntr) - *z_val;
1538 } else if (mode == CNTR_MODE_W) {
1539 /* A write can only zero the counter */
1540 if (data == 0)
1541 *z_val = get_all_cpu_total(cntr);
1542 else
1543 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1544 } else {
1545 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1546 return 0;
1547 }
1548
1549 return ret;
1550}
1551
1552static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1553 void *context, int vl, int mode, u64 data)
1554{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301555 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001556
1557 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1558 mode, data);
1559}
1560
1561static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1562 void *context, int vl, int mode, u64 data)
1563{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301564 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001565
1566 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1567 mode, data);
1568}
1569
1570static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1571 void *context, int vl, int mode, u64 data)
1572{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301573 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574
1575 return dd->verbs_dev.n_piowait;
1576}
1577
1578static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1579 void *context, int vl, int mode, u64 data)
1580{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301581 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001582
1583 return dd->verbs_dev.n_txwait;
1584}
1585
1586static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301589 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001590
1591 return dd->verbs_dev.n_kmem_wait;
1592}
1593
Dean Luickb4219222015-10-26 10:28:35 -04001594static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
1597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1598
1599 return dd->verbs_dev.n_send_schedule;
1600}
1601
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001602/* Software counters for the error status bits within MISC_ERR_STATUS */
1603static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1604 void *context, int vl, int mode,
1605 u64 data)
1606{
1607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1608
1609 return dd->misc_err_status_cnt[12];
1610}
1611
1612static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1613 void *context, int vl, int mode,
1614 u64 data)
1615{
1616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1617
1618 return dd->misc_err_status_cnt[11];
1619}
1620
1621static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1622 void *context, int vl, int mode,
1623 u64 data)
1624{
1625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1626
1627 return dd->misc_err_status_cnt[10];
1628}
1629
1630static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1631 void *context, int vl,
1632 int mode, u64 data)
1633{
1634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635
1636 return dd->misc_err_status_cnt[9];
1637}
1638
1639static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1640 void *context, int vl, int mode,
1641 u64 data)
1642{
1643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644
1645 return dd->misc_err_status_cnt[8];
1646}
1647
1648static u64 access_misc_efuse_read_bad_addr_err_cnt(
1649 const struct cntr_entry *entry,
1650 void *context, int vl, int mode, u64 data)
1651{
1652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653
1654 return dd->misc_err_status_cnt[7];
1655}
1656
1657static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1658 void *context, int vl,
1659 int mode, u64 data)
1660{
1661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662
1663 return dd->misc_err_status_cnt[6];
1664}
1665
1666static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1667 void *context, int vl, int mode,
1668 u64 data)
1669{
1670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671
1672 return dd->misc_err_status_cnt[5];
1673}
1674
1675static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1676 void *context, int vl, int mode,
1677 u64 data)
1678{
1679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681 return dd->misc_err_status_cnt[4];
1682}
1683
1684static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1685 void *context, int vl,
1686 int mode, u64 data)
1687{
1688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689
1690 return dd->misc_err_status_cnt[3];
1691}
1692
1693static u64 access_misc_csr_write_bad_addr_err_cnt(
1694 const struct cntr_entry *entry,
1695 void *context, int vl, int mode, u64 data)
1696{
1697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698
1699 return dd->misc_err_status_cnt[2];
1700}
1701
1702static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1703 void *context, int vl,
1704 int mode, u64 data)
1705{
1706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708 return dd->misc_err_status_cnt[1];
1709}
1710
1711static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1712 void *context, int vl, int mode,
1713 u64 data)
1714{
1715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717 return dd->misc_err_status_cnt[0];
1718}
1719
1720/*
1721 * Software counter for the aggregate of
1722 * individual CceErrStatus counters
1723 */
1724static u64 access_sw_cce_err_status_aggregated_cnt(
1725 const struct cntr_entry *entry,
1726 void *context, int vl, int mode, u64 data)
1727{
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730 return dd->sw_cce_err_status_aggregate;
1731}
1732
1733/*
1734 * Software counters corresponding to each of the
1735 * error status bits within CceErrStatus
1736 */
1737static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1738 void *context, int vl, int mode,
1739 u64 data)
1740{
1741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1742
1743 return dd->cce_err_status_cnt[40];
1744}
1745
1746static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1747 void *context, int vl, int mode,
1748 u64 data)
1749{
1750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1751
1752 return dd->cce_err_status_cnt[39];
1753}
1754
1755static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1756 void *context, int vl, int mode,
1757 u64 data)
1758{
1759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1760
1761 return dd->cce_err_status_cnt[38];
1762}
1763
1764static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1765 void *context, int vl, int mode,
1766 u64 data)
1767{
1768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769
1770 return dd->cce_err_status_cnt[37];
1771}
1772
1773static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1774 void *context, int vl, int mode,
1775 u64 data)
1776{
1777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778
1779 return dd->cce_err_status_cnt[36];
1780}
1781
1782static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1783 const struct cntr_entry *entry,
1784 void *context, int vl, int mode, u64 data)
1785{
1786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788 return dd->cce_err_status_cnt[35];
1789}
1790
1791static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1792 const struct cntr_entry *entry,
1793 void *context, int vl, int mode, u64 data)
1794{
1795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796
1797 return dd->cce_err_status_cnt[34];
1798}
1799
1800static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1801 void *context, int vl,
1802 int mode, u64 data)
1803{
1804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805
1806 return dd->cce_err_status_cnt[33];
1807}
1808
1809static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1810 void *context, int vl, int mode,
1811 u64 data)
1812{
1813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814
1815 return dd->cce_err_status_cnt[32];
1816}
1817
1818static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1819 void *context, int vl, int mode, u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->cce_err_status_cnt[31];
1824}
1825
1826static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1827 void *context, int vl, int mode,
1828 u64 data)
1829{
1830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831
1832 return dd->cce_err_status_cnt[30];
1833}
1834
1835static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1836 void *context, int vl, int mode,
1837 u64 data)
1838{
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841 return dd->cce_err_status_cnt[29];
1842}
1843
1844static u64 access_pcic_transmit_back_parity_err_cnt(
1845 const struct cntr_entry *entry,
1846 void *context, int vl, int mode, u64 data)
1847{
1848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849
1850 return dd->cce_err_status_cnt[28];
1851}
1852
1853static u64 access_pcic_transmit_front_parity_err_cnt(
1854 const struct cntr_entry *entry,
1855 void *context, int vl, int mode, u64 data)
1856{
1857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858
1859 return dd->cce_err_status_cnt[27];
1860}
1861
1862static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1863 void *context, int vl, int mode,
1864 u64 data)
1865{
1866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867
1868 return dd->cce_err_status_cnt[26];
1869}
1870
1871static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1872 void *context, int vl, int mode,
1873 u64 data)
1874{
1875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876
1877 return dd->cce_err_status_cnt[25];
1878}
1879
1880static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1881 void *context, int vl, int mode,
1882 u64 data)
1883{
1884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885
1886 return dd->cce_err_status_cnt[24];
1887}
1888
1889static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1890 void *context, int vl, int mode,
1891 u64 data)
1892{
1893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894
1895 return dd->cce_err_status_cnt[23];
1896}
1897
1898static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1899 void *context, int vl,
1900 int mode, u64 data)
1901{
1902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903
1904 return dd->cce_err_status_cnt[22];
1905}
1906
1907static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1908 void *context, int vl, int mode,
1909 u64 data)
1910{
1911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912
1913 return dd->cce_err_status_cnt[21];
1914}
1915
1916static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1917 const struct cntr_entry *entry,
1918 void *context, int vl, int mode, u64 data)
1919{
1920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921
1922 return dd->cce_err_status_cnt[20];
1923}
1924
1925static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1926 void *context, int vl,
1927 int mode, u64 data)
1928{
1929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930
1931 return dd->cce_err_status_cnt[19];
1932}
1933
1934static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1935 void *context, int vl, int mode,
1936 u64 data)
1937{
1938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939
1940 return dd->cce_err_status_cnt[18];
1941}
1942
1943static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1944 void *context, int vl, int mode,
1945 u64 data)
1946{
1947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948
1949 return dd->cce_err_status_cnt[17];
1950}
1951
1952static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1953 void *context, int vl, int mode,
1954 u64 data)
1955{
1956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957
1958 return dd->cce_err_status_cnt[16];
1959}
1960
1961static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1962 void *context, int vl, int mode,
1963 u64 data)
1964{
1965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966
1967 return dd->cce_err_status_cnt[15];
1968}
1969
1970static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1971 void *context, int vl,
1972 int mode, u64 data)
1973{
1974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975
1976 return dd->cce_err_status_cnt[14];
1977}
1978
1979static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1980 void *context, int vl, int mode,
1981 u64 data)
1982{
1983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984
1985 return dd->cce_err_status_cnt[13];
1986}
1987
1988static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1989 const struct cntr_entry *entry,
1990 void *context, int vl, int mode, u64 data)
1991{
1992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993
1994 return dd->cce_err_status_cnt[12];
1995}
1996
1997static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1998 const struct cntr_entry *entry,
1999 void *context, int vl, int mode, u64 data)
2000{
2001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002
2003 return dd->cce_err_status_cnt[11];
2004}
2005
2006static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2007 const struct cntr_entry *entry,
2008 void *context, int vl, int mode, u64 data)
2009{
2010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011
2012 return dd->cce_err_status_cnt[10];
2013}
2014
2015static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2016 const struct cntr_entry *entry,
2017 void *context, int vl, int mode, u64 data)
2018{
2019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020
2021 return dd->cce_err_status_cnt[9];
2022}
2023
2024static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2025 const struct cntr_entry *entry,
2026 void *context, int vl, int mode, u64 data)
2027{
2028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029
2030 return dd->cce_err_status_cnt[8];
2031}
2032
2033static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2034 void *context, int vl,
2035 int mode, u64 data)
2036{
2037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038
2039 return dd->cce_err_status_cnt[7];
2040}
2041
2042static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2043 const struct cntr_entry *entry,
2044 void *context, int vl, int mode, u64 data)
2045{
2046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047
2048 return dd->cce_err_status_cnt[6];
2049}
2050
2051static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2052 void *context, int vl, int mode,
2053 u64 data)
2054{
2055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056
2057 return dd->cce_err_status_cnt[5];
2058}
2059
2060static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2061 void *context, int vl, int mode,
2062 u64 data)
2063{
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066 return dd->cce_err_status_cnt[4];
2067}
2068
2069static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2070 const struct cntr_entry *entry,
2071 void *context, int vl, int mode, u64 data)
2072{
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075 return dd->cce_err_status_cnt[3];
2076}
2077
2078static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl,
2080 int mode, u64 data)
2081{
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084 return dd->cce_err_status_cnt[2];
2085}
2086
2087static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2088 void *context, int vl,
2089 int mode, u64 data)
2090{
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093 return dd->cce_err_status_cnt[1];
2094}
2095
2096static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2097 void *context, int vl, int mode,
2098 u64 data)
2099{
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102 return dd->cce_err_status_cnt[0];
2103}
2104
2105/*
2106 * Software counters corresponding to each of the
2107 * error status bits within RcvErrStatus
2108 */
2109static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2110 void *context, int vl, int mode,
2111 u64 data)
2112{
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115 return dd->rcv_err_status_cnt[63];
2116}
2117
2118static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2119 void *context, int vl,
2120 int mode, u64 data)
2121{
2122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123
2124 return dd->rcv_err_status_cnt[62];
2125}
2126
2127static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2128 void *context, int vl, int mode,
2129 u64 data)
2130{
2131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133 return dd->rcv_err_status_cnt[61];
2134}
2135
2136static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2137 void *context, int vl, int mode,
2138 u64 data)
2139{
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142 return dd->rcv_err_status_cnt[60];
2143}
2144
2145static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2146 void *context, int vl,
2147 int mode, u64 data)
2148{
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151 return dd->rcv_err_status_cnt[59];
2152}
2153
2154static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl,
2156 int mode, u64 data)
2157{
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160 return dd->rcv_err_status_cnt[58];
2161}
2162
2163static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2164 void *context, int vl, int mode,
2165 u64 data)
2166{
2167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169 return dd->rcv_err_status_cnt[57];
2170}
2171
2172static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2173 void *context, int vl, int mode,
2174 u64 data)
2175{
2176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178 return dd->rcv_err_status_cnt[56];
2179}
2180
2181static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2182 void *context, int vl, int mode,
2183 u64 data)
2184{
2185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187 return dd->rcv_err_status_cnt[55];
2188}
2189
2190static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2191 const struct cntr_entry *entry,
2192 void *context, int vl, int mode, u64 data)
2193{
2194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196 return dd->rcv_err_status_cnt[54];
2197}
2198
2199static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2200 const struct cntr_entry *entry,
2201 void *context, int vl, int mode, u64 data)
2202{
2203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205 return dd->rcv_err_status_cnt[53];
2206}
2207
2208static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2209 void *context, int vl,
2210 int mode, u64 data)
2211{
2212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214 return dd->rcv_err_status_cnt[52];
2215}
2216
2217static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2218 void *context, int vl,
2219 int mode, u64 data)
2220{
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223 return dd->rcv_err_status_cnt[51];
2224}
2225
2226static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2227 void *context, int vl,
2228 int mode, u64 data)
2229{
2230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231
2232 return dd->rcv_err_status_cnt[50];
2233}
2234
2235static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2236 void *context, int vl,
2237 int mode, u64 data)
2238{
2239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240
2241 return dd->rcv_err_status_cnt[49];
2242}
2243
2244static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2245 void *context, int vl,
2246 int mode, u64 data)
2247{
2248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249
2250 return dd->rcv_err_status_cnt[48];
2251}
2252
2253static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2254 void *context, int vl,
2255 int mode, u64 data)
2256{
2257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258
2259 return dd->rcv_err_status_cnt[47];
2260}
2261
2262static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2263 void *context, int vl, int mode,
2264 u64 data)
2265{
2266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267
2268 return dd->rcv_err_status_cnt[46];
2269}
2270
2271static u64 access_rx_hq_intr_csr_parity_err_cnt(
2272 const struct cntr_entry *entry,
2273 void *context, int vl, int mode, u64 data)
2274{
2275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276
2277 return dd->rcv_err_status_cnt[45];
2278}
2279
2280static u64 access_rx_lookup_csr_parity_err_cnt(
2281 const struct cntr_entry *entry,
2282 void *context, int vl, int mode, u64 data)
2283{
2284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285
2286 return dd->rcv_err_status_cnt[44];
2287}
2288
2289static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2290 const struct cntr_entry *entry,
2291 void *context, int vl, int mode, u64 data)
2292{
2293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294
2295 return dd->rcv_err_status_cnt[43];
2296}
2297
2298static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2299 const struct cntr_entry *entry,
2300 void *context, int vl, int mode, u64 data)
2301{
2302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303
2304 return dd->rcv_err_status_cnt[42];
2305}
2306
2307static u64 access_rx_lookup_des_part2_parity_err_cnt(
2308 const struct cntr_entry *entry,
2309 void *context, int vl, int mode, u64 data)
2310{
2311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312
2313 return dd->rcv_err_status_cnt[41];
2314}
2315
2316static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2317 const struct cntr_entry *entry,
2318 void *context, int vl, int mode, u64 data)
2319{
2320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321
2322 return dd->rcv_err_status_cnt[40];
2323}
2324
2325static u64 access_rx_lookup_des_part1_unc_err_cnt(
2326 const struct cntr_entry *entry,
2327 void *context, int vl, int mode, u64 data)
2328{
2329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330
2331 return dd->rcv_err_status_cnt[39];
2332}
2333
2334static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2335 const struct cntr_entry *entry,
2336 void *context, int vl, int mode, u64 data)
2337{
2338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339
2340 return dd->rcv_err_status_cnt[38];
2341}
2342
2343static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2344 const struct cntr_entry *entry,
2345 void *context, int vl, int mode, u64 data)
2346{
2347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348
2349 return dd->rcv_err_status_cnt[37];
2350}
2351
2352static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2353 const struct cntr_entry *entry,
2354 void *context, int vl, int mode, u64 data)
2355{
2356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357
2358 return dd->rcv_err_status_cnt[36];
2359}
2360
2361static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2362 const struct cntr_entry *entry,
2363 void *context, int vl, int mode, u64 data)
2364{
2365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366
2367 return dd->rcv_err_status_cnt[35];
2368}
2369
2370static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2371 const struct cntr_entry *entry,
2372 void *context, int vl, int mode, u64 data)
2373{
2374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375
2376 return dd->rcv_err_status_cnt[34];
2377}
2378
2379static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2380 const struct cntr_entry *entry,
2381 void *context, int vl, int mode, u64 data)
2382{
2383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384
2385 return dd->rcv_err_status_cnt[33];
2386}
2387
2388static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2389 void *context, int vl, int mode,
2390 u64 data)
2391{
2392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393
2394 return dd->rcv_err_status_cnt[32];
2395}
2396
2397static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2398 void *context, int vl, int mode,
2399 u64 data)
2400{
2401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402
2403 return dd->rcv_err_status_cnt[31];
2404}
2405
2406static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2407 void *context, int vl, int mode,
2408 u64 data)
2409{
2410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411
2412 return dd->rcv_err_status_cnt[30];
2413}
2414
2415static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2416 void *context, int vl, int mode,
2417 u64 data)
2418{
2419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420
2421 return dd->rcv_err_status_cnt[29];
2422}
2423
2424static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2425 void *context, int vl,
2426 int mode, u64 data)
2427{
2428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429
2430 return dd->rcv_err_status_cnt[28];
2431}
2432
2433static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2434 const struct cntr_entry *entry,
2435 void *context, int vl, int mode, u64 data)
2436{
2437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438
2439 return dd->rcv_err_status_cnt[27];
2440}
2441
2442static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2443 const struct cntr_entry *entry,
2444 void *context, int vl, int mode, u64 data)
2445{
2446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447
2448 return dd->rcv_err_status_cnt[26];
2449}
2450
2451static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2452 const struct cntr_entry *entry,
2453 void *context, int vl, int mode, u64 data)
2454{
2455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456
2457 return dd->rcv_err_status_cnt[25];
2458}
2459
2460static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2461 const struct cntr_entry *entry,
2462 void *context, int vl, int mode, u64 data)
2463{
2464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465
2466 return dd->rcv_err_status_cnt[24];
2467}
2468
2469static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2470 const struct cntr_entry *entry,
2471 void *context, int vl, int mode, u64 data)
2472{
2473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474
2475 return dd->rcv_err_status_cnt[23];
2476}
2477
2478static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2479 const struct cntr_entry *entry,
2480 void *context, int vl, int mode, u64 data)
2481{
2482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483
2484 return dd->rcv_err_status_cnt[22];
2485}
2486
2487static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2488 const struct cntr_entry *entry,
2489 void *context, int vl, int mode, u64 data)
2490{
2491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492
2493 return dd->rcv_err_status_cnt[21];
2494}
2495
2496static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2497 const struct cntr_entry *entry,
2498 void *context, int vl, int mode, u64 data)
2499{
2500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501
2502 return dd->rcv_err_status_cnt[20];
2503}
2504
2505static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2506 const struct cntr_entry *entry,
2507 void *context, int vl, int mode, u64 data)
2508{
2509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510
2511 return dd->rcv_err_status_cnt[19];
2512}
2513
2514static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2515 void *context, int vl,
2516 int mode, u64 data)
2517{
2518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519
2520 return dd->rcv_err_status_cnt[18];
2521}
2522
2523static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2524 void *context, int vl,
2525 int mode, u64 data)
2526{
2527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528
2529 return dd->rcv_err_status_cnt[17];
2530}
2531
2532static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2533 const struct cntr_entry *entry,
2534 void *context, int vl, int mode, u64 data)
2535{
2536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537
2538 return dd->rcv_err_status_cnt[16];
2539}
2540
2541static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2542 const struct cntr_entry *entry,
2543 void *context, int vl, int mode, u64 data)
2544{
2545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546
2547 return dd->rcv_err_status_cnt[15];
2548}
2549
2550static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2551 void *context, int vl,
2552 int mode, u64 data)
2553{
2554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555
2556 return dd->rcv_err_status_cnt[14];
2557}
2558
2559static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2560 void *context, int vl,
2561 int mode, u64 data)
2562{
2563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564
2565 return dd->rcv_err_status_cnt[13];
2566}
2567
2568static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2569 void *context, int vl, int mode,
2570 u64 data)
2571{
2572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573
2574 return dd->rcv_err_status_cnt[12];
2575}
2576
2577static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2578 void *context, int vl, int mode,
2579 u64 data)
2580{
2581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582
2583 return dd->rcv_err_status_cnt[11];
2584}
2585
2586static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2587 void *context, int vl, int mode,
2588 u64 data)
2589{
2590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591
2592 return dd->rcv_err_status_cnt[10];
2593}
2594
2595static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2596 void *context, int vl, int mode,
2597 u64 data)
2598{
2599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600
2601 return dd->rcv_err_status_cnt[9];
2602}
2603
2604static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2605 void *context, int vl, int mode,
2606 u64 data)
2607{
2608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609
2610 return dd->rcv_err_status_cnt[8];
2611}
2612
2613static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2614 const struct cntr_entry *entry,
2615 void *context, int vl, int mode, u64 data)
2616{
2617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618
2619 return dd->rcv_err_status_cnt[7];
2620}
2621
2622static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2623 const struct cntr_entry *entry,
2624 void *context, int vl, int mode, u64 data)
2625{
2626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627
2628 return dd->rcv_err_status_cnt[6];
2629}
2630
2631static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2632 void *context, int vl, int mode,
2633 u64 data)
2634{
2635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636
2637 return dd->rcv_err_status_cnt[5];
2638}
2639
2640static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2641 void *context, int vl, int mode,
2642 u64 data)
2643{
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646 return dd->rcv_err_status_cnt[4];
2647}
2648
2649static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2650 void *context, int vl, int mode,
2651 u64 data)
2652{
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655 return dd->rcv_err_status_cnt[3];
2656}
2657
2658static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2659 void *context, int vl, int mode,
2660 u64 data)
2661{
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664 return dd->rcv_err_status_cnt[2];
2665}
2666
2667static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2668 void *context, int vl, int mode,
2669 u64 data)
2670{
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673 return dd->rcv_err_status_cnt[1];
2674}
2675
2676static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2678 u64 data)
2679{
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682 return dd->rcv_err_status_cnt[0];
2683}
2684
2685/*
2686 * Software counters corresponding to each of the
2687 * error status bits within SendPioErrStatus
2688 */
2689static u64 access_pio_pec_sop_head_parity_err_cnt(
2690 const struct cntr_entry *entry,
2691 void *context, int vl, int mode, u64 data)
2692{
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695 return dd->send_pio_err_status_cnt[35];
2696}
2697
2698static u64 access_pio_pcc_sop_head_parity_err_cnt(
2699 const struct cntr_entry *entry,
2700 void *context, int vl, int mode, u64 data)
2701{
2702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703
2704 return dd->send_pio_err_status_cnt[34];
2705}
2706
2707static u64 access_pio_last_returned_cnt_parity_err_cnt(
2708 const struct cntr_entry *entry,
2709 void *context, int vl, int mode, u64 data)
2710{
2711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713 return dd->send_pio_err_status_cnt[33];
2714}
2715
2716static u64 access_pio_current_free_cnt_parity_err_cnt(
2717 const struct cntr_entry *entry,
2718 void *context, int vl, int mode, u64 data)
2719{
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722 return dd->send_pio_err_status_cnt[32];
2723}
2724
2725static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2726 void *context, int vl, int mode,
2727 u64 data)
2728{
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731 return dd->send_pio_err_status_cnt[31];
2732}
2733
2734static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2735 void *context, int vl, int mode,
2736 u64 data)
2737{
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740 return dd->send_pio_err_status_cnt[30];
2741}
2742
2743static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2744 void *context, int vl, int mode,
2745 u64 data)
2746{
2747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749 return dd->send_pio_err_status_cnt[29];
2750}
2751
2752static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2753 const struct cntr_entry *entry,
2754 void *context, int vl, int mode, u64 data)
2755{
2756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758 return dd->send_pio_err_status_cnt[28];
2759}
2760
2761static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2762 void *context, int vl, int mode,
2763 u64 data)
2764{
2765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767 return dd->send_pio_err_status_cnt[27];
2768}
2769
2770static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2771 void *context, int vl, int mode,
2772 u64 data)
2773{
2774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776 return dd->send_pio_err_status_cnt[26];
2777}
2778
2779static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2780 void *context, int vl,
2781 int mode, u64 data)
2782{
2783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785 return dd->send_pio_err_status_cnt[25];
2786}
2787
2788static u64 access_pio_block_qw_count_parity_err_cnt(
2789 const struct cntr_entry *entry,
2790 void *context, int vl, int mode, u64 data)
2791{
2792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794 return dd->send_pio_err_status_cnt[24];
2795}
2796
2797static u64 access_pio_write_qw_valid_parity_err_cnt(
2798 const struct cntr_entry *entry,
2799 void *context, int vl, int mode, u64 data)
2800{
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803 return dd->send_pio_err_status_cnt[23];
2804}
2805
2806static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2807 void *context, int vl, int mode,
2808 u64 data)
2809{
2810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811
2812 return dd->send_pio_err_status_cnt[22];
2813}
2814
2815static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2816 void *context, int vl,
2817 int mode, u64 data)
2818{
2819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820
2821 return dd->send_pio_err_status_cnt[21];
2822}
2823
2824static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2825 void *context, int vl,
2826 int mode, u64 data)
2827{
2828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829
2830 return dd->send_pio_err_status_cnt[20];
2831}
2832
2833static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2834 void *context, int vl,
2835 int mode, u64 data)
2836{
2837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838
2839 return dd->send_pio_err_status_cnt[19];
2840}
2841
2842static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2843 const struct cntr_entry *entry,
2844 void *context, int vl, int mode, u64 data)
2845{
2846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847
2848 return dd->send_pio_err_status_cnt[18];
2849}
2850
2851static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2852 void *context, int vl, int mode,
2853 u64 data)
2854{
2855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856
2857 return dd->send_pio_err_status_cnt[17];
2858}
2859
2860static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2861 void *context, int vl, int mode,
2862 u64 data)
2863{
2864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865
2866 return dd->send_pio_err_status_cnt[16];
2867}
2868
2869static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2870 const struct cntr_entry *entry,
2871 void *context, int vl, int mode, u64 data)
2872{
2873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874
2875 return dd->send_pio_err_status_cnt[15];
2876}
2877
2878static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2879 const struct cntr_entry *entry,
2880 void *context, int vl, int mode, u64 data)
2881{
2882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883
2884 return dd->send_pio_err_status_cnt[14];
2885}
2886
2887static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2888 const struct cntr_entry *entry,
2889 void *context, int vl, int mode, u64 data)
2890{
2891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892
2893 return dd->send_pio_err_status_cnt[13];
2894}
2895
2896static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2897 const struct cntr_entry *entry,
2898 void *context, int vl, int mode, u64 data)
2899{
2900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901
2902 return dd->send_pio_err_status_cnt[12];
2903}
2904
2905static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2906 const struct cntr_entry *entry,
2907 void *context, int vl, int mode, u64 data)
2908{
2909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910
2911 return dd->send_pio_err_status_cnt[11];
2912}
2913
2914static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2915 const struct cntr_entry *entry,
2916 void *context, int vl, int mode, u64 data)
2917{
2918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919
2920 return dd->send_pio_err_status_cnt[10];
2921}
2922
2923static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2924 const struct cntr_entry *entry,
2925 void *context, int vl, int mode, u64 data)
2926{
2927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928
2929 return dd->send_pio_err_status_cnt[9];
2930}
2931
2932static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2933 const struct cntr_entry *entry,
2934 void *context, int vl, int mode, u64 data)
2935{
2936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937
2938 return dd->send_pio_err_status_cnt[8];
2939}
2940
2941static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2942 const struct cntr_entry *entry,
2943 void *context, int vl, int mode, u64 data)
2944{
2945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946
2947 return dd->send_pio_err_status_cnt[7];
2948}
2949
2950static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2951 void *context, int vl, int mode,
2952 u64 data)
2953{
2954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955
2956 return dd->send_pio_err_status_cnt[6];
2957}
2958
2959static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2960 void *context, int vl, int mode,
2961 u64 data)
2962{
2963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964
2965 return dd->send_pio_err_status_cnt[5];
2966}
2967
2968static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2969 void *context, int vl, int mode,
2970 u64 data)
2971{
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974 return dd->send_pio_err_status_cnt[4];
2975}
2976
2977static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2978 void *context, int vl, int mode,
2979 u64 data)
2980{
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983 return dd->send_pio_err_status_cnt[3];
2984}
2985
2986static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2987 void *context, int vl, int mode,
2988 u64 data)
2989{
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992 return dd->send_pio_err_status_cnt[2];
2993}
2994
2995static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2996 void *context, int vl,
2997 int mode, u64 data)
2998{
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001 return dd->send_pio_err_status_cnt[1];
3002}
3003
3004static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3005 void *context, int vl, int mode,
3006 u64 data)
3007{
3008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009
3010 return dd->send_pio_err_status_cnt[0];
3011}
3012
3013/*
3014 * Software counters corresponding to each of the
3015 * error status bits within SendDmaErrStatus
3016 */
3017static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3018 const struct cntr_entry *entry,
3019 void *context, int vl, int mode, u64 data)
3020{
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023 return dd->send_dma_err_status_cnt[3];
3024}
3025
3026static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3027 const struct cntr_entry *entry,
3028 void *context, int vl, int mode, u64 data)
3029{
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032 return dd->send_dma_err_status_cnt[2];
3033}
3034
3035static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3036 void *context, int vl, int mode,
3037 u64 data)
3038{
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041 return dd->send_dma_err_status_cnt[1];
3042}
3043
3044static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3045 void *context, int vl, int mode,
3046 u64 data)
3047{
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050 return dd->send_dma_err_status_cnt[0];
3051}
3052
3053/*
3054 * Software counters corresponding to each of the
3055 * error status bits within SendEgressErrStatus
3056 */
3057static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3058 const struct cntr_entry *entry,
3059 void *context, int vl, int mode, u64 data)
3060{
3061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3062
3063 return dd->send_egress_err_status_cnt[63];
3064}
3065
3066static u64 access_tx_read_sdma_memory_csr_err_cnt(
3067 const struct cntr_entry *entry,
3068 void *context, int vl, int mode, u64 data)
3069{
3070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3071
3072 return dd->send_egress_err_status_cnt[62];
3073}
3074
3075static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3076 void *context, int vl, int mode,
3077 u64 data)
3078{
3079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080
3081 return dd->send_egress_err_status_cnt[61];
3082}
3083
3084static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3085 void *context, int vl,
3086 int mode, u64 data)
3087{
3088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090 return dd->send_egress_err_status_cnt[60];
3091}
3092
3093static u64 access_tx_read_sdma_memory_cor_err_cnt(
3094 const struct cntr_entry *entry,
3095 void *context, int vl, int mode, u64 data)
3096{
3097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099 return dd->send_egress_err_status_cnt[59];
3100}
3101
3102static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3103 void *context, int vl, int mode,
3104 u64 data)
3105{
3106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108 return dd->send_egress_err_status_cnt[58];
3109}
3110
3111static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3112 void *context, int vl, int mode,
3113 u64 data)
3114{
3115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116
3117 return dd->send_egress_err_status_cnt[57];
3118}
3119
3120static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3121 void *context, int vl, int mode,
3122 u64 data)
3123{
3124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125
3126 return dd->send_egress_err_status_cnt[56];
3127}
3128
3129static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3130 void *context, int vl, int mode,
3131 u64 data)
3132{
3133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134
3135 return dd->send_egress_err_status_cnt[55];
3136}
3137
3138static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3139 void *context, int vl, int mode,
3140 u64 data)
3141{
3142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144 return dd->send_egress_err_status_cnt[54];
3145}
3146
3147static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3148 void *context, int vl, int mode,
3149 u64 data)
3150{
3151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153 return dd->send_egress_err_status_cnt[53];
3154}
3155
3156static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3157 void *context, int vl, int mode,
3158 u64 data)
3159{
3160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162 return dd->send_egress_err_status_cnt[52];
3163}
3164
3165static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3166 void *context, int vl, int mode,
3167 u64 data)
3168{
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171 return dd->send_egress_err_status_cnt[51];
3172}
3173
3174static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3175 void *context, int vl, int mode,
3176 u64 data)
3177{
3178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179
3180 return dd->send_egress_err_status_cnt[50];
3181}
3182
3183static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3184 void *context, int vl, int mode,
3185 u64 data)
3186{
3187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188
3189 return dd->send_egress_err_status_cnt[49];
3190}
3191
3192static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3193 void *context, int vl, int mode,
3194 u64 data)
3195{
3196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197
3198 return dd->send_egress_err_status_cnt[48];
3199}
3200
3201static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3202 void *context, int vl, int mode,
3203 u64 data)
3204{
3205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206
3207 return dd->send_egress_err_status_cnt[47];
3208}
3209
3210static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3211 void *context, int vl, int mode,
3212 u64 data)
3213{
3214 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215
3216 return dd->send_egress_err_status_cnt[46];
3217}
3218
3219static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3220 void *context, int vl, int mode,
3221 u64 data)
3222{
3223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224
3225 return dd->send_egress_err_status_cnt[45];
3226}
3227
3228static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3229 void *context, int vl,
3230 int mode, u64 data)
3231{
3232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233
3234 return dd->send_egress_err_status_cnt[44];
3235}
3236
3237static u64 access_tx_read_sdma_memory_unc_err_cnt(
3238 const struct cntr_entry *entry,
3239 void *context, int vl, int mode, u64 data)
3240{
3241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242
3243 return dd->send_egress_err_status_cnt[43];
3244}
3245
3246static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3247 void *context, int vl, int mode,
3248 u64 data)
3249{
3250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251
3252 return dd->send_egress_err_status_cnt[42];
3253}
3254
3255static u64 access_tx_credit_return_partiy_err_cnt(
3256 const struct cntr_entry *entry,
3257 void *context, int vl, int mode, u64 data)
3258{
3259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260
3261 return dd->send_egress_err_status_cnt[41];
3262}
3263
3264static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3265 const struct cntr_entry *entry,
3266 void *context, int vl, int mode, u64 data)
3267{
3268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269
3270 return dd->send_egress_err_status_cnt[40];
3271}
3272
3273static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3274 const struct cntr_entry *entry,
3275 void *context, int vl, int mode, u64 data)
3276{
3277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278
3279 return dd->send_egress_err_status_cnt[39];
3280}
3281
3282static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3283 const struct cntr_entry *entry,
3284 void *context, int vl, int mode, u64 data)
3285{
3286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287
3288 return dd->send_egress_err_status_cnt[38];
3289}
3290
3291static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3292 const struct cntr_entry *entry,
3293 void *context, int vl, int mode, u64 data)
3294{
3295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296
3297 return dd->send_egress_err_status_cnt[37];
3298}
3299
3300static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3301 const struct cntr_entry *entry,
3302 void *context, int vl, int mode, u64 data)
3303{
3304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305
3306 return dd->send_egress_err_status_cnt[36];
3307}
3308
3309static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3310 const struct cntr_entry *entry,
3311 void *context, int vl, int mode, u64 data)
3312{
3313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314
3315 return dd->send_egress_err_status_cnt[35];
3316}
3317
3318static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3319 const struct cntr_entry *entry,
3320 void *context, int vl, int mode, u64 data)
3321{
3322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323
3324 return dd->send_egress_err_status_cnt[34];
3325}
3326
3327static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3328 const struct cntr_entry *entry,
3329 void *context, int vl, int mode, u64 data)
3330{
3331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332
3333 return dd->send_egress_err_status_cnt[33];
3334}
3335
3336static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3337 const struct cntr_entry *entry,
3338 void *context, int vl, int mode, u64 data)
3339{
3340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341
3342 return dd->send_egress_err_status_cnt[32];
3343}
3344
3345static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3346 const struct cntr_entry *entry,
3347 void *context, int vl, int mode, u64 data)
3348{
3349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350
3351 return dd->send_egress_err_status_cnt[31];
3352}
3353
3354static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3355 const struct cntr_entry *entry,
3356 void *context, int vl, int mode, u64 data)
3357{
3358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359
3360 return dd->send_egress_err_status_cnt[30];
3361}
3362
3363static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3364 const struct cntr_entry *entry,
3365 void *context, int vl, int mode, u64 data)
3366{
3367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368
3369 return dd->send_egress_err_status_cnt[29];
3370}
3371
3372static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3373 const struct cntr_entry *entry,
3374 void *context, int vl, int mode, u64 data)
3375{
3376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377
3378 return dd->send_egress_err_status_cnt[28];
3379}
3380
3381static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3382 const struct cntr_entry *entry,
3383 void *context, int vl, int mode, u64 data)
3384{
3385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386
3387 return dd->send_egress_err_status_cnt[27];
3388}
3389
3390static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3391 const struct cntr_entry *entry,
3392 void *context, int vl, int mode, u64 data)
3393{
3394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395
3396 return dd->send_egress_err_status_cnt[26];
3397}
3398
3399static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3400 const struct cntr_entry *entry,
3401 void *context, int vl, int mode, u64 data)
3402{
3403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404
3405 return dd->send_egress_err_status_cnt[25];
3406}
3407
3408static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3409 const struct cntr_entry *entry,
3410 void *context, int vl, int mode, u64 data)
3411{
3412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413
3414 return dd->send_egress_err_status_cnt[24];
3415}
3416
3417static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3418 const struct cntr_entry *entry,
3419 void *context, int vl, int mode, u64 data)
3420{
3421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422
3423 return dd->send_egress_err_status_cnt[23];
3424}
3425
3426static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3427 const struct cntr_entry *entry,
3428 void *context, int vl, int mode, u64 data)
3429{
3430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431
3432 return dd->send_egress_err_status_cnt[22];
3433}
3434
3435static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3436 const struct cntr_entry *entry,
3437 void *context, int vl, int mode, u64 data)
3438{
3439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440
3441 return dd->send_egress_err_status_cnt[21];
3442}
3443
3444static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3445 const struct cntr_entry *entry,
3446 void *context, int vl, int mode, u64 data)
3447{
3448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449
3450 return dd->send_egress_err_status_cnt[20];
3451}
3452
3453static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3454 const struct cntr_entry *entry,
3455 void *context, int vl, int mode, u64 data)
3456{
3457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458
3459 return dd->send_egress_err_status_cnt[19];
3460}
3461
3462static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3463 const struct cntr_entry *entry,
3464 void *context, int vl, int mode, u64 data)
3465{
3466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467
3468 return dd->send_egress_err_status_cnt[18];
3469}
3470
3471static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3472 const struct cntr_entry *entry,
3473 void *context, int vl, int mode, u64 data)
3474{
3475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476
3477 return dd->send_egress_err_status_cnt[17];
3478}
3479
3480static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3481 const struct cntr_entry *entry,
3482 void *context, int vl, int mode, u64 data)
3483{
3484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485
3486 return dd->send_egress_err_status_cnt[16];
3487}
3488
3489static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3490 void *context, int vl, int mode,
3491 u64 data)
3492{
3493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494
3495 return dd->send_egress_err_status_cnt[15];
3496}
3497
3498static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3499 void *context, int vl,
3500 int mode, u64 data)
3501{
3502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503
3504 return dd->send_egress_err_status_cnt[14];
3505}
3506
3507static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3508 void *context, int vl, int mode,
3509 u64 data)
3510{
3511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512
3513 return dd->send_egress_err_status_cnt[13];
3514}
3515
3516static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3517 void *context, int vl, int mode,
3518 u64 data)
3519{
3520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521
3522 return dd->send_egress_err_status_cnt[12];
3523}
3524
3525static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3526 const struct cntr_entry *entry,
3527 void *context, int vl, int mode, u64 data)
3528{
3529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530
3531 return dd->send_egress_err_status_cnt[11];
3532}
3533
3534static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3535 void *context, int vl, int mode,
3536 u64 data)
3537{
3538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539
3540 return dd->send_egress_err_status_cnt[10];
3541}
3542
3543static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3544 void *context, int vl, int mode,
3545 u64 data)
3546{
3547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548
3549 return dd->send_egress_err_status_cnt[9];
3550}
3551
3552static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3553 const struct cntr_entry *entry,
3554 void *context, int vl, int mode, u64 data)
3555{
3556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557
3558 return dd->send_egress_err_status_cnt[8];
3559}
3560
3561static u64 access_tx_pio_launch_intf_parity_err_cnt(
3562 const struct cntr_entry *entry,
3563 void *context, int vl, int mode, u64 data)
3564{
3565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566
3567 return dd->send_egress_err_status_cnt[7];
3568}
3569
3570static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3571 void *context, int vl, int mode,
3572 u64 data)
3573{
3574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575
3576 return dd->send_egress_err_status_cnt[6];
3577}
3578
3579static u64 access_tx_incorrect_link_state_err_cnt(
3580 const struct cntr_entry *entry,
3581 void *context, int vl, int mode, u64 data)
3582{
3583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584
3585 return dd->send_egress_err_status_cnt[5];
3586}
3587
3588static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3589 void *context, int vl, int mode,
3590 u64 data)
3591{
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594 return dd->send_egress_err_status_cnt[4];
3595}
3596
3597static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3598 const struct cntr_entry *entry,
3599 void *context, int vl, int mode, u64 data)
3600{
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603 return dd->send_egress_err_status_cnt[3];
3604}
3605
3606static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3607 void *context, int vl, int mode,
3608 u64 data)
3609{
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612 return dd->send_egress_err_status_cnt[2];
3613}
3614
3615static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3616 const struct cntr_entry *entry,
3617 void *context, int vl, int mode, u64 data)
3618{
3619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620
3621 return dd->send_egress_err_status_cnt[1];
3622}
3623
3624static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3625 const struct cntr_entry *entry,
3626 void *context, int vl, int mode, u64 data)
3627{
3628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629
3630 return dd->send_egress_err_status_cnt[0];
3631}
3632
3633/*
3634 * Software counters corresponding to each of the
3635 * error status bits within SendErrStatus
3636 */
3637static u64 access_send_csr_write_bad_addr_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3640{
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643 return dd->send_err_status_cnt[2];
3644}
3645
3646static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3647 void *context, int vl,
3648 int mode, u64 data)
3649{
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652 return dd->send_err_status_cnt[1];
3653}
3654
3655static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3656 void *context, int vl, int mode,
3657 u64 data)
3658{
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661 return dd->send_err_status_cnt[0];
3662}
3663
3664/*
3665 * Software counters corresponding to each of the
3666 * error status bits within SendCtxtErrStatus
3667 */
3668static u64 access_pio_write_out_of_bounds_err_cnt(
3669 const struct cntr_entry *entry,
3670 void *context, int vl, int mode, u64 data)
3671{
3672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3673
3674 return dd->sw_ctxt_err_status_cnt[4];
3675}
3676
3677static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3678 void *context, int vl, int mode,
3679 u64 data)
3680{
3681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3682
3683 return dd->sw_ctxt_err_status_cnt[3];
3684}
3685
3686static u64 access_pio_write_crosses_boundary_err_cnt(
3687 const struct cntr_entry *entry,
3688 void *context, int vl, int mode, u64 data)
3689{
3690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691
3692 return dd->sw_ctxt_err_status_cnt[2];
3693}
3694
3695static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3696 void *context, int vl,
3697 int mode, u64 data)
3698{
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701 return dd->sw_ctxt_err_status_cnt[1];
3702}
3703
3704static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3705 void *context, int vl, int mode,
3706 u64 data)
3707{
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710 return dd->sw_ctxt_err_status_cnt[0];
3711}
3712
3713/*
3714 * Software counters corresponding to each of the
3715 * error status bits within SendDmaEngErrStatus
3716 */
3717static u64 access_sdma_header_request_fifo_cor_err_cnt(
3718 const struct cntr_entry *entry,
3719 void *context, int vl, int mode, u64 data)
3720{
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723 return dd->sw_send_dma_eng_err_status_cnt[23];
3724}
3725
3726static u64 access_sdma_header_storage_cor_err_cnt(
3727 const struct cntr_entry *entry,
3728 void *context, int vl, int mode, u64 data)
3729{
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732 return dd->sw_send_dma_eng_err_status_cnt[22];
3733}
3734
3735static u64 access_sdma_packet_tracking_cor_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3738{
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741 return dd->sw_send_dma_eng_err_status_cnt[21];
3742}
3743
3744static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3745 void *context, int vl, int mode,
3746 u64 data)
3747{
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750 return dd->sw_send_dma_eng_err_status_cnt[20];
3751}
3752
3753static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3754 void *context, int vl, int mode,
3755 u64 data)
3756{
3757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759 return dd->sw_send_dma_eng_err_status_cnt[19];
3760}
3761
3762static u64 access_sdma_header_request_fifo_unc_err_cnt(
3763 const struct cntr_entry *entry,
3764 void *context, int vl, int mode, u64 data)
3765{
3766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768 return dd->sw_send_dma_eng_err_status_cnt[18];
3769}
3770
3771static u64 access_sdma_header_storage_unc_err_cnt(
3772 const struct cntr_entry *entry,
3773 void *context, int vl, int mode, u64 data)
3774{
3775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776
3777 return dd->sw_send_dma_eng_err_status_cnt[17];
3778}
3779
3780static u64 access_sdma_packet_tracking_unc_err_cnt(
3781 const struct cntr_entry *entry,
3782 void *context, int vl, int mode, u64 data)
3783{
3784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785
3786 return dd->sw_send_dma_eng_err_status_cnt[16];
3787}
3788
3789static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3790 void *context, int vl, int mode,
3791 u64 data)
3792{
3793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795 return dd->sw_send_dma_eng_err_status_cnt[15];
3796}
3797
3798static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3799 void *context, int vl, int mode,
3800 u64 data)
3801{
3802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804 return dd->sw_send_dma_eng_err_status_cnt[14];
3805}
3806
3807static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3808 void *context, int vl, int mode,
3809 u64 data)
3810{
3811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813 return dd->sw_send_dma_eng_err_status_cnt[13];
3814}
3815
3816static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3817 void *context, int vl, int mode,
3818 u64 data)
3819{
3820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822 return dd->sw_send_dma_eng_err_status_cnt[12];
3823}
3824
3825static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3826 void *context, int vl, int mode,
3827 u64 data)
3828{
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831 return dd->sw_send_dma_eng_err_status_cnt[11];
3832}
3833
3834static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3835 void *context, int vl, int mode,
3836 u64 data)
3837{
3838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839
3840 return dd->sw_send_dma_eng_err_status_cnt[10];
3841}
3842
3843static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3844 void *context, int vl, int mode,
3845 u64 data)
3846{
3847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848
3849 return dd->sw_send_dma_eng_err_status_cnt[9];
3850}
3851
3852static u64 access_sdma_packet_desc_overflow_err_cnt(
3853 const struct cntr_entry *entry,
3854 void *context, int vl, int mode, u64 data)
3855{
3856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857
3858 return dd->sw_send_dma_eng_err_status_cnt[8];
3859}
3860
3861static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3862 void *context, int vl,
3863 int mode, u64 data)
3864{
3865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866
3867 return dd->sw_send_dma_eng_err_status_cnt[7];
3868}
3869
3870static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3871 void *context, int vl, int mode, u64 data)
3872{
3873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874
3875 return dd->sw_send_dma_eng_err_status_cnt[6];
3876}
3877
3878static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3879 void *context, int vl, int mode,
3880 u64 data)
3881{
3882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883
3884 return dd->sw_send_dma_eng_err_status_cnt[5];
3885}
3886
3887static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3888 void *context, int vl, int mode,
3889 u64 data)
3890{
3891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892
3893 return dd->sw_send_dma_eng_err_status_cnt[4];
3894}
3895
3896static u64 access_sdma_tail_out_of_bounds_err_cnt(
3897 const struct cntr_entry *entry,
3898 void *context, int vl, int mode, u64 data)
3899{
3900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901
3902 return dd->sw_send_dma_eng_err_status_cnt[3];
3903}
3904
3905static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3906 void *context, int vl, int mode,
3907 u64 data)
3908{
3909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910
3911 return dd->sw_send_dma_eng_err_status_cnt[2];
3912}
3913
3914static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3915 void *context, int vl, int mode,
3916 u64 data)
3917{
3918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919
3920 return dd->sw_send_dma_eng_err_status_cnt[1];
3921}
3922
3923static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3924 void *context, int vl, int mode,
3925 u64 data)
3926{
3927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928
3929 return dd->sw_send_dma_eng_err_status_cnt[0];
3930}
3931
Mike Marciniszyn77241052015-07-30 15:17:43 -04003932#define def_access_sw_cpu(cntr) \
3933static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3934 void *context, int vl, int mode, u64 data) \
3935{ \
3936 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003937 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3938 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003939 mode, data); \
3940}
3941
3942def_access_sw_cpu(rc_acks);
3943def_access_sw_cpu(rc_qacks);
3944def_access_sw_cpu(rc_delayed_comp);
3945
3946#define def_access_ibp_counter(cntr) \
3947static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3948 void *context, int vl, int mode, u64 data) \
3949{ \
3950 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3951 \
3952 if (vl != CNTR_INVALID_VL) \
3953 return 0; \
3954 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003955 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003956 mode, data); \
3957}
3958
3959def_access_ibp_counter(loop_pkts);
3960def_access_ibp_counter(rc_resends);
3961def_access_ibp_counter(rnr_naks);
3962def_access_ibp_counter(other_naks);
3963def_access_ibp_counter(rc_timeouts);
3964def_access_ibp_counter(pkt_drops);
3965def_access_ibp_counter(dmawait);
3966def_access_ibp_counter(rc_seqnak);
3967def_access_ibp_counter(rc_dupreq);
3968def_access_ibp_counter(rdma_seq);
3969def_access_ibp_counter(unaligned);
3970def_access_ibp_counter(seq_naks);
3971
3972static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3973[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3974[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3975 CNTR_NORMAL),
3976[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3977 CNTR_NORMAL),
3978[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3979 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3980 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003981[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3982 CNTR_NORMAL),
3983[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3984 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3985[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3986 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3987[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3988 CNTR_NORMAL),
3989[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3990 CNTR_NORMAL),
3991[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3992 CNTR_NORMAL),
3993[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3994 CNTR_NORMAL),
3995[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3996 CNTR_NORMAL),
3997[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3998 CNTR_NORMAL),
3999[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4000 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4001[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4002 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4003[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4004 CNTR_SYNTH),
4005[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4006[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4007 CNTR_SYNTH),
4008[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4009 CNTR_SYNTH),
4010[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4011 CNTR_SYNTH),
4012[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4013 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4014[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4015 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4016 CNTR_SYNTH),
4017[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4018 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4019[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4020 CNTR_SYNTH),
4021[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4022 CNTR_SYNTH),
4023[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4024 CNTR_SYNTH),
4025[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4026 CNTR_SYNTH),
4027[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4028 CNTR_SYNTH),
4029[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4030 CNTR_SYNTH),
4031[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4032 CNTR_SYNTH),
4033[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4034 CNTR_SYNTH | CNTR_VL),
4035[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4036 CNTR_SYNTH | CNTR_VL),
4037[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4038[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4039 CNTR_SYNTH | CNTR_VL),
4040[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4041[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4042 CNTR_SYNTH | CNTR_VL),
4043[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4044 CNTR_SYNTH),
4045[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4046 CNTR_SYNTH | CNTR_VL),
4047[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4048 CNTR_SYNTH),
4049[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4050 CNTR_SYNTH | CNTR_VL),
4051[C_DC_TOTAL_CRC] =
4052 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4053 CNTR_SYNTH),
4054[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4055 CNTR_SYNTH),
4056[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4057 CNTR_SYNTH),
4058[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4059 CNTR_SYNTH),
4060[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4061 CNTR_SYNTH),
4062[C_DC_CRC_MULT_LN] =
4063 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4064 CNTR_SYNTH),
4065[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4066 CNTR_SYNTH),
4067[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4068 CNTR_SYNTH),
4069[C_DC_SEQ_CRC_CNT] =
4070 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4071 CNTR_SYNTH),
4072[C_DC_ESC0_ONLY_CNT] =
4073 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4074 CNTR_SYNTH),
4075[C_DC_ESC0_PLUS1_CNT] =
4076 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4077 CNTR_SYNTH),
4078[C_DC_ESC0_PLUS2_CNT] =
4079 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4080 CNTR_SYNTH),
4081[C_DC_REINIT_FROM_PEER_CNT] =
4082 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4083 CNTR_SYNTH),
4084[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4085 CNTR_SYNTH),
4086[C_DC_MISC_FLG_CNT] =
4087 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4088 CNTR_SYNTH),
4089[C_DC_PRF_GOOD_LTP_CNT] =
4090 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4091[C_DC_PRF_ACCEPTED_LTP_CNT] =
4092 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4093 CNTR_SYNTH),
4094[C_DC_PRF_RX_FLIT_CNT] =
4095 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4096[C_DC_PRF_TX_FLIT_CNT] =
4097 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4098[C_DC_PRF_CLK_CNTR] =
4099 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4100[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4101 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4102[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4103 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4104 CNTR_SYNTH),
4105[C_DC_PG_STS_TX_SBE_CNT] =
4106 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4107[C_DC_PG_STS_TX_MBE_CNT] =
4108 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4109 CNTR_SYNTH),
4110[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4111 access_sw_cpu_intr),
4112[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4113 access_sw_cpu_rcv_limit),
4114[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4115 access_sw_vtx_wait),
4116[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4117 access_sw_pio_wait),
4118[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4119 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004120[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4121 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004122[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4123 SEND_DMA_DESC_FETCHED_CNT, 0,
4124 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4125 dev_access_u32_csr),
4126[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4127 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4128 access_sde_int_cnt),
4129[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4130 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4131 access_sde_err_cnt),
4132[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4133 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4134 access_sde_idle_int_cnt),
4135[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4136 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4137 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004138/* MISC_ERR_STATUS */
4139[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4140 CNTR_NORMAL,
4141 access_misc_pll_lock_fail_err_cnt),
4142[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4143 CNTR_NORMAL,
4144 access_misc_mbist_fail_err_cnt),
4145[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4146 CNTR_NORMAL,
4147 access_misc_invalid_eep_cmd_err_cnt),
4148[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4149 CNTR_NORMAL,
4150 access_misc_efuse_done_parity_err_cnt),
4151[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4152 CNTR_NORMAL,
4153 access_misc_efuse_write_err_cnt),
4154[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4155 0, CNTR_NORMAL,
4156 access_misc_efuse_read_bad_addr_err_cnt),
4157[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4158 CNTR_NORMAL,
4159 access_misc_efuse_csr_parity_err_cnt),
4160[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4161 CNTR_NORMAL,
4162 access_misc_fw_auth_failed_err_cnt),
4163[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4164 CNTR_NORMAL,
4165 access_misc_key_mismatch_err_cnt),
4166[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4167 CNTR_NORMAL,
4168 access_misc_sbus_write_failed_err_cnt),
4169[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4170 CNTR_NORMAL,
4171 access_misc_csr_write_bad_addr_err_cnt),
4172[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4173 CNTR_NORMAL,
4174 access_misc_csr_read_bad_addr_err_cnt),
4175[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4176 CNTR_NORMAL,
4177 access_misc_csr_parity_err_cnt),
4178/* CceErrStatus */
4179[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4180 CNTR_NORMAL,
4181 access_sw_cce_err_status_aggregated_cnt),
4182[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4183 CNTR_NORMAL,
4184 access_cce_msix_csr_parity_err_cnt),
4185[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4186 CNTR_NORMAL,
4187 access_cce_int_map_unc_err_cnt),
4188[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4189 CNTR_NORMAL,
4190 access_cce_int_map_cor_err_cnt),
4191[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4192 CNTR_NORMAL,
4193 access_cce_msix_table_unc_err_cnt),
4194[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4195 CNTR_NORMAL,
4196 access_cce_msix_table_cor_err_cnt),
4197[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4198 0, CNTR_NORMAL,
4199 access_cce_rxdma_conv_fifo_parity_err_cnt),
4200[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4201 0, CNTR_NORMAL,
4202 access_cce_rcpl_async_fifo_parity_err_cnt),
4203[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4204 CNTR_NORMAL,
4205 access_cce_seg_write_bad_addr_err_cnt),
4206[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4207 CNTR_NORMAL,
4208 access_cce_seg_read_bad_addr_err_cnt),
4209[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4210 CNTR_NORMAL,
4211 access_la_triggered_cnt),
4212[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4213 CNTR_NORMAL,
4214 access_cce_trgt_cpl_timeout_err_cnt),
4215[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4216 CNTR_NORMAL,
4217 access_pcic_receive_parity_err_cnt),
4218[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4219 CNTR_NORMAL,
4220 access_pcic_transmit_back_parity_err_cnt),
4221[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4222 0, CNTR_NORMAL,
4223 access_pcic_transmit_front_parity_err_cnt),
4224[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4225 CNTR_NORMAL,
4226 access_pcic_cpl_dat_q_unc_err_cnt),
4227[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4228 CNTR_NORMAL,
4229 access_pcic_cpl_hd_q_unc_err_cnt),
4230[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4231 CNTR_NORMAL,
4232 access_pcic_post_dat_q_unc_err_cnt),
4233[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4234 CNTR_NORMAL,
4235 access_pcic_post_hd_q_unc_err_cnt),
4236[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_pcic_retry_sot_mem_unc_err_cnt),
4239[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_pcic_retry_mem_unc_err),
4242[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4243 CNTR_NORMAL,
4244 access_pcic_n_post_dat_q_parity_err_cnt),
4245[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_pcic_n_post_h_q_parity_err_cnt),
4248[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_pcic_cpl_dat_q_cor_err_cnt),
4251[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_pcic_cpl_hd_q_cor_err_cnt),
4254[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_pcic_post_dat_q_cor_err_cnt),
4257[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_pcic_post_hd_q_cor_err_cnt),
4260[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_pcic_retry_sot_mem_cor_err_cnt),
4263[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_pcic_retry_mem_cor_err_cnt),
4266[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4267 "CceCli1AsyncFifoDbgParityError", 0, 0,
4268 CNTR_NORMAL,
4269 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4270[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4271 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4272 CNTR_NORMAL,
4273 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4274 ),
4275[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4276 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4279[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4280 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4283[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4284 0, CNTR_NORMAL,
4285 access_cce_cli2_async_fifo_parity_err_cnt),
4286[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4287 CNTR_NORMAL,
4288 access_cce_csr_cfg_bus_parity_err_cnt),
4289[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4290 0, CNTR_NORMAL,
4291 access_cce_cli0_async_fifo_parity_err_cnt),
4292[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4293 CNTR_NORMAL,
4294 access_cce_rspd_data_parity_err_cnt),
4295[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_trgt_access_err_cnt),
4298[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4299 0, CNTR_NORMAL,
4300 access_cce_trgt_async_fifo_parity_err_cnt),
4301[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_cce_csr_write_bad_addr_err_cnt),
4304[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_cce_csr_read_bad_addr_err_cnt),
4307[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_ccs_csr_parity_err_cnt),
4310
4311/* RcvErrStatus */
4312[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_rx_csr_parity_err_cnt),
4315[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_rx_csr_write_bad_addr_err_cnt),
4318[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4319 CNTR_NORMAL,
4320 access_rx_csr_read_bad_addr_err_cnt),
4321[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4322 CNTR_NORMAL,
4323 access_rx_dma_csr_unc_err_cnt),
4324[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_rx_dma_dq_fsm_encoding_err_cnt),
4327[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_rx_dma_eq_fsm_encoding_err_cnt),
4330[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_rx_dma_csr_parity_err_cnt),
4333[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_rx_rbuf_data_cor_err_cnt),
4336[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rx_rbuf_data_unc_err_cnt),
4339[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rx_dma_data_fifo_rd_cor_err_cnt),
4342[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rx_dma_data_fifo_rd_unc_err_cnt),
4345[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4348[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4351[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_rx_rbuf_desc_part2_cor_err_cnt),
4354[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_rx_rbuf_desc_part2_unc_err_cnt),
4357[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_rx_rbuf_desc_part1_cor_err_cnt),
4360[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_rx_rbuf_desc_part1_unc_err_cnt),
4363[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_hq_intr_fsm_err_cnt),
4366[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_hq_intr_csr_parity_err_cnt),
4369[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_lookup_csr_parity_err_cnt),
4372[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_lookup_rcv_array_cor_err_cnt),
4375[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_lookup_rcv_array_unc_err_cnt),
4378[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4379 0, CNTR_NORMAL,
4380 access_rx_lookup_des_part2_parity_err_cnt),
4381[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4382 0, CNTR_NORMAL,
4383 access_rx_lookup_des_part1_unc_cor_err_cnt),
4384[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_lookup_des_part1_unc_err_cnt),
4387[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_rbuf_next_free_buf_cor_err_cnt),
4390[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_rx_rbuf_next_free_buf_unc_err_cnt),
4393[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4394 "RxRbufFlInitWrAddrParityErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4397[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4398 0, CNTR_NORMAL,
4399 access_rx_rbuf_fl_initdone_parity_err_cnt),
4400[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4401 0, CNTR_NORMAL,
4402 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4403[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4406[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_empty_err_cnt),
4409[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rbuf_full_err_cnt),
4412[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4413 CNTR_NORMAL,
4414 access_rbuf_bad_lookup_err_cnt),
4415[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rbuf_ctx_id_parity_err_cnt),
4418[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rbuf_csr_qeopdw_parity_err_cnt),
4421[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4422 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4425[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4426 "RxRbufCsrQTlPtrParityErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4429[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4430 0, CNTR_NORMAL,
4431 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4432[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4433 0, CNTR_NORMAL,
4434 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4435[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4436 0, 0, CNTR_NORMAL,
4437 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4438[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4439 0, CNTR_NORMAL,
4440 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4441[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4442 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4445[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4446 0, CNTR_NORMAL,
4447 access_rx_rbuf_block_list_read_cor_err_cnt),
4448[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4449 0, CNTR_NORMAL,
4450 access_rx_rbuf_block_list_read_unc_err_cnt),
4451[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4452 CNTR_NORMAL,
4453 access_rx_rbuf_lookup_des_cor_err_cnt),
4454[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_rbuf_lookup_des_unc_err_cnt),
4457[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4458 "RxRbufLookupDesRegUncCorErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4461[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4464[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_rx_rbuf_free_list_cor_err_cnt),
4467[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_rx_rbuf_free_list_unc_err_cnt),
4470[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rcv_fsm_encoding_err_cnt),
4473[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_dma_flag_cor_err_cnt),
4476[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_rx_dma_flag_unc_err_cnt),
4479[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_rx_dc_sop_eop_parity_err_cnt),
4482[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rcv_csr_parity_err_cnt),
4485[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_rcv_qp_map_table_cor_err_cnt),
4488[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rcv_qp_map_table_unc_err_cnt),
4491[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_rcv_data_cor_err_cnt),
4494[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_rcv_data_unc_err_cnt),
4497[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_rcv_hdr_cor_err_cnt),
4500[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_rcv_hdr_unc_err_cnt),
4503[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_dc_intf_parity_err_cnt),
4506[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_dma_csr_cor_err_cnt),
4509/* SendPioErrStatus */
4510[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_pio_pec_sop_head_parity_err_cnt),
4513[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_pio_pcc_sop_head_parity_err_cnt),
4516[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4517 0, 0, CNTR_NORMAL,
4518 access_pio_last_returned_cnt_parity_err_cnt),
4519[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4520 0, CNTR_NORMAL,
4521 access_pio_current_free_cnt_parity_err_cnt),
4522[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4523 CNTR_NORMAL,
4524 access_pio_reserved_31_err_cnt),
4525[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4526 CNTR_NORMAL,
4527 access_pio_reserved_30_err_cnt),
4528[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4529 CNTR_NORMAL,
4530 access_pio_ppmc_sop_len_err_cnt),
4531[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4532 CNTR_NORMAL,
4533 access_pio_ppmc_bqc_mem_parity_err_cnt),
4534[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_vl_fifo_parity_err_cnt),
4537[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_pio_vlf_sop_parity_err_cnt),
4540[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_pio_vlf_v1_len_parity_err_cnt),
4543[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4544 CNTR_NORMAL,
4545 access_pio_block_qw_count_parity_err_cnt),
4546[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4547 CNTR_NORMAL,
4548 access_pio_write_qw_valid_parity_err_cnt),
4549[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_pio_state_machine_err_cnt),
4552[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_pio_write_data_parity_err_cnt),
4555[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_pio_host_addr_mem_cor_err_cnt),
4558[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_pio_host_addr_mem_unc_err_cnt),
4561[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4564[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_init_sm_in_err_cnt),
4567[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4568 CNTR_NORMAL,
4569 access_pio_ppmc_pbl_fifo_err_cnt),
4570[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4571 0, CNTR_NORMAL,
4572 access_pio_credit_ret_fifo_parity_err_cnt),
4573[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_v1_len_mem_bank1_cor_err_cnt),
4576[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_v1_len_mem_bank0_cor_err_cnt),
4579[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_v1_len_mem_bank1_unc_err_cnt),
4582[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_pio_v1_len_mem_bank0_unc_err_cnt),
4585[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_sm_pkt_reset_parity_err_cnt),
4588[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_pkt_evict_fifo_parity_err_cnt),
4591[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4592 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4595[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_sbrdctl_crrel_parity_err_cnt),
4598[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_pec_fifo_parity_err_cnt),
4601[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_pcc_fifo_parity_err_cnt),
4604[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_sb_mem_fifo1_err_cnt),
4607[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_sb_mem_fifo0_err_cnt),
4610[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4611 CNTR_NORMAL,
4612 access_pio_csr_parity_err_cnt),
4613[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_write_addr_parity_err_cnt),
4616[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_write_bad_ctxt_err_cnt),
4619/* SendDmaErrStatus */
4620[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4621 0, CNTR_NORMAL,
4622 access_sdma_pcie_req_tracking_cor_err_cnt),
4623[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4624 0, CNTR_NORMAL,
4625 access_sdma_pcie_req_tracking_unc_err_cnt),
4626[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_sdma_csr_parity_err_cnt),
4629[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_sdma_rpy_tag_err_cnt),
4632/* SendEgressErrStatus */
4633[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_tx_read_pio_memory_csr_unc_err_cnt),
4636[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4637 0, CNTR_NORMAL,
4638 access_tx_read_sdma_memory_csr_err_cnt),
4639[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_tx_egress_fifo_cor_err_cnt),
4642[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_tx_read_pio_memory_cor_err_cnt),
4645[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_tx_read_sdma_memory_cor_err_cnt),
4648[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_tx_sb_hdr_cor_err_cnt),
4651[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4652 CNTR_NORMAL,
4653 access_tx_credit_overrun_err_cnt),
4654[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4655 CNTR_NORMAL,
4656 access_tx_launch_fifo8_cor_err_cnt),
4657[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4658 CNTR_NORMAL,
4659 access_tx_launch_fifo7_cor_err_cnt),
4660[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_tx_launch_fifo6_cor_err_cnt),
4663[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_tx_launch_fifo5_cor_err_cnt),
4666[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_tx_launch_fifo4_cor_err_cnt),
4669[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_tx_launch_fifo3_cor_err_cnt),
4672[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_tx_launch_fifo2_cor_err_cnt),
4675[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_tx_launch_fifo1_cor_err_cnt),
4678[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_tx_launch_fifo0_cor_err_cnt),
4681[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_tx_credit_return_vl_err_cnt),
4684[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_hcrc_insertion_err_cnt),
4687[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4688 CNTR_NORMAL,
4689 access_tx_egress_fifo_unc_err_cnt),
4690[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_read_pio_memory_unc_err_cnt),
4693[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_read_sdma_memory_unc_err_cnt),
4696[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_sb_hdr_unc_err_cnt),
4699[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_credit_return_partiy_err_cnt),
4702[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4703 0, 0, CNTR_NORMAL,
4704 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4705[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4706 0, 0, CNTR_NORMAL,
4707 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4708[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4709 0, 0, CNTR_NORMAL,
4710 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4711[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4712 0, 0, CNTR_NORMAL,
4713 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4714[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4715 0, 0, CNTR_NORMAL,
4716 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4717[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4718 0, 0, CNTR_NORMAL,
4719 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4720[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4721 0, 0, CNTR_NORMAL,
4722 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4723[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4724 0, 0, CNTR_NORMAL,
4725 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4726[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4727 0, 0, CNTR_NORMAL,
4728 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4729[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4730 0, 0, CNTR_NORMAL,
4731 access_tx_sdma15_disallowed_packet_err_cnt),
4732[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4733 0, 0, CNTR_NORMAL,
4734 access_tx_sdma14_disallowed_packet_err_cnt),
4735[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4736 0, 0, CNTR_NORMAL,
4737 access_tx_sdma13_disallowed_packet_err_cnt),
4738[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4739 0, 0, CNTR_NORMAL,
4740 access_tx_sdma12_disallowed_packet_err_cnt),
4741[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4742 0, 0, CNTR_NORMAL,
4743 access_tx_sdma11_disallowed_packet_err_cnt),
4744[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4745 0, 0, CNTR_NORMAL,
4746 access_tx_sdma10_disallowed_packet_err_cnt),
4747[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4748 0, 0, CNTR_NORMAL,
4749 access_tx_sdma9_disallowed_packet_err_cnt),
4750[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4751 0, 0, CNTR_NORMAL,
4752 access_tx_sdma8_disallowed_packet_err_cnt),
4753[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_sdma7_disallowed_packet_err_cnt),
4756[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_sdma6_disallowed_packet_err_cnt),
4759[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_sdma5_disallowed_packet_err_cnt),
4762[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_sdma4_disallowed_packet_err_cnt),
4765[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_sdma3_disallowed_packet_err_cnt),
4768[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_sdma2_disallowed_packet_err_cnt),
4771[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_sdma1_disallowed_packet_err_cnt),
4774[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_sdma0_disallowed_packet_err_cnt),
4777[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4778 CNTR_NORMAL,
4779 access_tx_config_parity_err_cnt),
4780[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4781 CNTR_NORMAL,
4782 access_tx_sbrd_ctl_csr_parity_err_cnt),
4783[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4784 CNTR_NORMAL,
4785 access_tx_launch_csr_parity_err_cnt),
4786[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4787 CNTR_NORMAL,
4788 access_tx_illegal_vl_err_cnt),
4789[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4790 "TxSbrdCtlStateMachineParityErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4793[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4794 CNTR_NORMAL,
4795 access_egress_reserved_10_err_cnt),
4796[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4797 CNTR_NORMAL,
4798 access_egress_reserved_9_err_cnt),
4799[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4800 0, 0, CNTR_NORMAL,
4801 access_tx_sdma_launch_intf_parity_err_cnt),
4802[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_pio_launch_intf_parity_err_cnt),
4805[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4806 CNTR_NORMAL,
4807 access_egress_reserved_6_err_cnt),
4808[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4809 CNTR_NORMAL,
4810 access_tx_incorrect_link_state_err_cnt),
4811[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_linkdown_err_cnt),
4814[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4815 "EgressFifoUnderrunOrParityErr", 0, 0,
4816 CNTR_NORMAL,
4817 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4818[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4819 CNTR_NORMAL,
4820 access_egress_reserved_2_err_cnt),
4821[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_pkt_integrity_mem_unc_err_cnt),
4824[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4825 CNTR_NORMAL,
4826 access_tx_pkt_integrity_mem_cor_err_cnt),
4827/* SendErrStatus */
4828[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4829 CNTR_NORMAL,
4830 access_send_csr_write_bad_addr_err_cnt),
4831[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4832 CNTR_NORMAL,
4833 access_send_csr_read_bad_addr_err_cnt),
4834[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_send_csr_parity_cnt),
4837/* SendCtxtErrStatus */
4838[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4839 CNTR_NORMAL,
4840 access_pio_write_out_of_bounds_err_cnt),
4841[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_pio_write_overflow_err_cnt),
4844[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4845 0, 0, CNTR_NORMAL,
4846 access_pio_write_crosses_boundary_err_cnt),
4847[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_pio_disallowed_packet_err_cnt),
4850[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4851 CNTR_NORMAL,
4852 access_pio_inconsistent_sop_err_cnt),
4853/* SendDmaEngErrStatus */
4854[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4855 0, 0, CNTR_NORMAL,
4856 access_sdma_header_request_fifo_cor_err_cnt),
4857[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4858 CNTR_NORMAL,
4859 access_sdma_header_storage_cor_err_cnt),
4860[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_sdma_packet_tracking_cor_err_cnt),
4863[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4864 CNTR_NORMAL,
4865 access_sdma_assembly_cor_err_cnt),
4866[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_sdma_desc_table_cor_err_cnt),
4869[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4870 0, 0, CNTR_NORMAL,
4871 access_sdma_header_request_fifo_unc_err_cnt),
4872[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4873 CNTR_NORMAL,
4874 access_sdma_header_storage_unc_err_cnt),
4875[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4876 CNTR_NORMAL,
4877 access_sdma_packet_tracking_unc_err_cnt),
4878[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_sdma_assembly_unc_err_cnt),
4881[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_sdma_desc_table_unc_err_cnt),
4884[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_sdma_timeout_err_cnt),
4887[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_sdma_header_length_err_cnt),
4890[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4891 CNTR_NORMAL,
4892 access_sdma_header_address_err_cnt),
4893[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_sdma_header_select_err_cnt),
4896[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4897 CNTR_NORMAL,
4898 access_sdma_reserved_9_err_cnt),
4899[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_sdma_packet_desc_overflow_err_cnt),
4902[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_sdma_length_mismatch_err_cnt),
4905[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_sdma_halt_err_cnt),
4908[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_mem_read_err_cnt),
4911[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_first_desc_err_cnt),
4914[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_tail_out_of_bounds_err_cnt),
4917[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_too_long_err_cnt),
4920[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_sdma_gen_mismatch_err_cnt),
4923[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004926};
4927
4928static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4929[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4930 CNTR_NORMAL),
4931[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4932 CNTR_NORMAL),
4933[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4934 CNTR_NORMAL),
4935[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4936 CNTR_NORMAL),
4937[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4938 CNTR_NORMAL),
4939[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4940 CNTR_NORMAL),
4941[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4942 CNTR_NORMAL),
4943[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4944[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4945[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4946[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4947 CNTR_SYNTH | CNTR_VL),
4948[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4949 CNTR_SYNTH | CNTR_VL),
4950[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4951 CNTR_SYNTH | CNTR_VL),
4952[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4953[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4954[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4955 access_sw_link_dn_cnt),
4956[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4957 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004958[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4959 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004960[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4961 access_sw_xmit_discards),
4962[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4963 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4964 access_sw_xmit_discards),
4965[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4966 access_xmit_constraint_errs),
4967[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4968 access_rcv_constraint_errs),
4969[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4970[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4971[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4972[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4973[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4974[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4975[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4976[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4977[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4978[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4979[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4980[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4981[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4982 access_sw_cpu_rc_acks),
4983[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4984 access_sw_cpu_rc_qacks),
4985[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4986 access_sw_cpu_rc_delayed_comp),
4987[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4988[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4989[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4990[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4991[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4992[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4993[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4994[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4995[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4996[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4997[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4998[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4999[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5000[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5001[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5002[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5003[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5004[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5005[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5006[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5007[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5008[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5009[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5010[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5011[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5012[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5013[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5014[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5015[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5016[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5017[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5018[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5019[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5020[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5021[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5022[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5023[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5024[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5025[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5026[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5027[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5028[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5029[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5030[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5031[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5032[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5033[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5034[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5035[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5036[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5037[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5038[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5039[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5040[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5041[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5042[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5043[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5044[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5045[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5046[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5047[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5048[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5049[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5050[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5051[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5052[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5053[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5054[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5055[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5056[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5057[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5058[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5059[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5060[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5061[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5062[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5063[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5064[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5065[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5066[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5067};
5068
5069/* ======================================================================== */
5070
Mike Marciniszyn77241052015-07-30 15:17:43 -04005071/* return true if this is chip revision revision a */
5072int is_ax(struct hfi1_devdata *dd)
5073{
5074 u8 chip_rev_minor =
5075 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5076 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5077 return (chip_rev_minor & 0xf0) == 0;
5078}
5079
5080/* return true if this is chip revision revision b */
5081int is_bx(struct hfi1_devdata *dd)
5082{
5083 u8 chip_rev_minor =
5084 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5085 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005086 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005087}
5088
5089/*
5090 * Append string s to buffer buf. Arguments curp and len are the current
5091 * position and remaining length, respectively.
5092 *
5093 * return 0 on success, 1 on out of room
5094 */
5095static int append_str(char *buf, char **curp, int *lenp, const char *s)
5096{
5097 char *p = *curp;
5098 int len = *lenp;
5099 int result = 0; /* success */
5100 char c;
5101
5102 /* add a comma, if first in the buffer */
5103 if (p != buf) {
5104 if (len == 0) {
5105 result = 1; /* out of room */
5106 goto done;
5107 }
5108 *p++ = ',';
5109 len--;
5110 }
5111
5112 /* copy the string */
5113 while ((c = *s++) != 0) {
5114 if (len == 0) {
5115 result = 1; /* out of room */
5116 goto done;
5117 }
5118 *p++ = c;
5119 len--;
5120 }
5121
5122done:
5123 /* write return values */
5124 *curp = p;
5125 *lenp = len;
5126
5127 return result;
5128}
5129
5130/*
5131 * Using the given flag table, print a comma separated string into
5132 * the buffer. End in '*' if the buffer is too short.
5133 */
5134static char *flag_string(char *buf, int buf_len, u64 flags,
5135 struct flag_table *table, int table_size)
5136{
5137 char extra[32];
5138 char *p = buf;
5139 int len = buf_len;
5140 int no_room = 0;
5141 int i;
5142
5143 /* make sure there is at least 2 so we can form "*" */
5144 if (len < 2)
5145 return "";
5146
5147 len--; /* leave room for a nul */
5148 for (i = 0; i < table_size; i++) {
5149 if (flags & table[i].flag) {
5150 no_room = append_str(buf, &p, &len, table[i].str);
5151 if (no_room)
5152 break;
5153 flags &= ~table[i].flag;
5154 }
5155 }
5156
5157 /* any undocumented bits left? */
5158 if (!no_room && flags) {
5159 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5160 no_room = append_str(buf, &p, &len, extra);
5161 }
5162
5163 /* add * if ran out of room */
5164 if (no_room) {
5165 /* may need to back up to add space for a '*' */
5166 if (len == 0)
5167 --p;
5168 *p++ = '*';
5169 }
5170
5171 /* add final nul - space already allocated above */
5172 *p = 0;
5173 return buf;
5174}
5175
5176/* first 8 CCE error interrupt source names */
5177static const char * const cce_misc_names[] = {
5178 "CceErrInt", /* 0 */
5179 "RxeErrInt", /* 1 */
5180 "MiscErrInt", /* 2 */
5181 "Reserved3", /* 3 */
5182 "PioErrInt", /* 4 */
5183 "SDmaErrInt", /* 5 */
5184 "EgressErrInt", /* 6 */
5185 "TxeErrInt" /* 7 */
5186};
5187
5188/*
5189 * Return the miscellaneous error interrupt name.
5190 */
5191static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5192{
5193 if (source < ARRAY_SIZE(cce_misc_names))
5194 strncpy(buf, cce_misc_names[source], bsize);
5195 else
5196 snprintf(buf,
5197 bsize,
5198 "Reserved%u",
5199 source + IS_GENERAL_ERR_START);
5200
5201 return buf;
5202}
5203
5204/*
5205 * Return the SDMA engine error interrupt name.
5206 */
5207static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5208{
5209 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5210 return buf;
5211}
5212
5213/*
5214 * Return the send context error interrupt name.
5215 */
5216static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5217{
5218 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5219 return buf;
5220}
5221
5222static const char * const various_names[] = {
5223 "PbcInt",
5224 "GpioAssertInt",
5225 "Qsfp1Int",
5226 "Qsfp2Int",
5227 "TCritInt"
5228};
5229
5230/*
5231 * Return the various interrupt name.
5232 */
5233static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5234{
5235 if (source < ARRAY_SIZE(various_names))
5236 strncpy(buf, various_names[source], bsize);
5237 else
5238 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5239 return buf;
5240}
5241
5242/*
5243 * Return the DC interrupt name.
5244 */
5245static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5246{
5247 static const char * const dc_int_names[] = {
5248 "common",
5249 "lcb",
5250 "8051",
5251 "lbm" /* local block merge */
5252 };
5253
5254 if (source < ARRAY_SIZE(dc_int_names))
5255 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5256 else
5257 snprintf(buf, bsize, "DCInt%u", source);
5258 return buf;
5259}
5260
5261static const char * const sdma_int_names[] = {
5262 "SDmaInt",
5263 "SdmaIdleInt",
5264 "SdmaProgressInt",
5265};
5266
5267/*
5268 * Return the SDMA engine interrupt name.
5269 */
5270static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5271{
5272 /* what interrupt */
5273 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5274 /* which engine */
5275 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5276
5277 if (likely(what < 3))
5278 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5279 else
5280 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5281 return buf;
5282}
5283
5284/*
5285 * Return the receive available interrupt name.
5286 */
5287static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5288{
5289 snprintf(buf, bsize, "RcvAvailInt%u", source);
5290 return buf;
5291}
5292
5293/*
5294 * Return the receive urgent interrupt name.
5295 */
5296static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5297{
5298 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5299 return buf;
5300}
5301
5302/*
5303 * Return the send credit interrupt name.
5304 */
5305static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5306{
5307 snprintf(buf, bsize, "SendCreditInt%u", source);
5308 return buf;
5309}
5310
5311/*
5312 * Return the reserved interrupt name.
5313 */
5314static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5315{
5316 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5317 return buf;
5318}
5319
5320static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5321{
5322 return flag_string(buf, buf_len, flags,
5323 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5324}
5325
5326static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5327{
5328 return flag_string(buf, buf_len, flags,
5329 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5330}
5331
5332static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5333{
5334 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5335 ARRAY_SIZE(misc_err_status_flags));
5336}
5337
5338static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5339{
5340 return flag_string(buf, buf_len, flags,
5341 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5342}
5343
5344static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags,
5347 sdma_err_status_flags,
5348 ARRAY_SIZE(sdma_err_status_flags));
5349}
5350
5351static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5352{
5353 return flag_string(buf, buf_len, flags,
5354 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5355}
5356
5357static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5358{
5359 return flag_string(buf, buf_len, flags,
5360 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5361}
5362
5363static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5364{
5365 return flag_string(buf, buf_len, flags,
5366 send_err_status_flags,
5367 ARRAY_SIZE(send_err_status_flags));
5368}
5369
5370static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5371{
5372 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005373 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005374
5375 /*
5376 * For most these errors, there is nothing that can be done except
5377 * report or record it.
5378 */
5379 dd_dev_info(dd, "CCE Error: %s\n",
5380 cce_err_status_string(buf, sizeof(buf), reg));
5381
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005382 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5383 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005384 /* this error requires a manual drop into SPC freeze mode */
5385 /* then a fix up */
5386 start_freeze_handling(dd->pport, FREEZE_SELF);
5387 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005388
5389 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5390 if (reg & (1ull << i)) {
5391 incr_cntr64(&dd->cce_err_status_cnt[i]);
5392 /* maintain a counter over all cce_err_status errors */
5393 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5394 }
5395 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005396}
5397
5398/*
5399 * Check counters for receive errors that do not have an interrupt
5400 * associated with them.
5401 */
5402#define RCVERR_CHECK_TIME 10
5403static void update_rcverr_timer(unsigned long opaque)
5404{
5405 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5406 struct hfi1_pportdata *ppd = dd->pport;
5407 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5408
5409 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5410 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5411 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5412 set_link_down_reason(ppd,
5413 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5414 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5415 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5416 }
5417 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5418
5419 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5420}
5421
5422static int init_rcverr(struct hfi1_devdata *dd)
5423{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305424 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005425 /* Assume the hardware counter has been reset */
5426 dd->rcv_ovfl_cnt = 0;
5427 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5428}
5429
5430static void free_rcverr(struct hfi1_devdata *dd)
5431{
5432 if (dd->rcverr_timer.data)
5433 del_timer_sync(&dd->rcverr_timer);
5434 dd->rcverr_timer.data = 0;
5435}
5436
5437static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5438{
5439 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005440 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005441
5442 dd_dev_info(dd, "Receive Error: %s\n",
5443 rxe_err_status_string(buf, sizeof(buf), reg));
5444
5445 if (reg & ALL_RXE_FREEZE_ERR) {
5446 int flags = 0;
5447
5448 /*
5449 * Freeze mode recovery is disabled for the errors
5450 * in RXE_FREEZE_ABORT_MASK
5451 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005452 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005453 flags = FREEZE_ABORT;
5454
5455 start_freeze_handling(dd->pport, flags);
5456 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005457
5458 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5459 if (reg & (1ull << i))
5460 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5461 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005462}
5463
5464static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5465{
5466 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005467 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005468
5469 dd_dev_info(dd, "Misc Error: %s",
5470 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005471 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5472 if (reg & (1ull << i))
5473 incr_cntr64(&dd->misc_err_status_cnt[i]);
5474 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005475}
5476
5477static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5478{
5479 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005480 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005481
5482 dd_dev_info(dd, "PIO Error: %s\n",
5483 pio_err_status_string(buf, sizeof(buf), reg));
5484
5485 if (reg & ALL_PIO_FREEZE_ERR)
5486 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005487
5488 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5489 if (reg & (1ull << i))
5490 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5491 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005492}
5493
5494static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5495{
5496 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005497 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005498
5499 dd_dev_info(dd, "SDMA Error: %s\n",
5500 sdma_err_status_string(buf, sizeof(buf), reg));
5501
5502 if (reg & ALL_SDMA_FREEZE_ERR)
5503 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005504
5505 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5506 if (reg & (1ull << i))
5507 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5508 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005509}
5510
5511static void count_port_inactive(struct hfi1_devdata *dd)
5512{
5513 struct hfi1_pportdata *ppd = dd->pport;
5514
5515 if (ppd->port_xmit_discards < ~(u64)0)
5516 ppd->port_xmit_discards++;
5517}
5518
5519/*
5520 * We have had a "disallowed packet" error during egress. Determine the
5521 * integrity check which failed, and update relevant error counter, etc.
5522 *
5523 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5524 * bit of state per integrity check, and so we can miss the reason for an
5525 * egress error if more than one packet fails the same integrity check
5526 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5527 */
5528static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5529{
5530 struct hfi1_pportdata *ppd = dd->pport;
5531 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5532 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5533 char buf[96];
5534
5535 /* clear down all observed info as quickly as possible after read */
5536 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5537
5538 dd_dev_info(dd,
5539 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5540 info, egress_err_info_string(buf, sizeof(buf), info), src);
5541
5542 /* Eventually add other counters for each bit */
5543
5544 if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5545 if (ppd->port_xmit_discards < ~(u64)0)
5546 ppd->port_xmit_discards++;
5547 }
5548}
5549
5550/*
5551 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5552 * register. Does it represent a 'port inactive' error?
5553 */
5554static inline int port_inactive_err(u64 posn)
5555{
5556 return (posn >= SEES(TX_LINKDOWN) &&
5557 posn <= SEES(TX_INCORRECT_LINK_STATE));
5558}
5559
5560/*
5561 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5562 * register. Does it represent a 'disallowed packet' error?
5563 */
5564static inline int disallowed_pkt_err(u64 posn)
5565{
5566 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5567 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5568}
5569
5570static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5571{
5572 u64 reg_copy = reg, handled = 0;
5573 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005574 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005575
5576 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5577 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005578 if (is_ax(dd) && (reg &
Mike Marciniszyn77241052015-07-30 15:17:43 -04005579 SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5580 && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5581 start_freeze_handling(dd->pport, 0);
5582
5583 while (reg_copy) {
5584 int posn = fls64(reg_copy);
5585 /*
5586 * fls64() returns a 1-based offset, but we generally
5587 * want 0-based offsets.
5588 */
5589 int shift = posn - 1;
5590
5591 if (port_inactive_err(shift)) {
5592 count_port_inactive(dd);
5593 handled |= (1ULL << shift);
5594 } else if (disallowed_pkt_err(shift)) {
5595 handle_send_egress_err_info(dd);
5596 handled |= (1ULL << shift);
5597 }
5598 clear_bit(shift, (unsigned long *)&reg_copy);
5599 }
5600
5601 reg &= ~handled;
5602
5603 if (reg)
5604 dd_dev_info(dd, "Egress Error: %s\n",
5605 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005606
5607 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5608 if (reg & (1ull << i))
5609 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5610 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005611}
5612
5613static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5614{
5615 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005616 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005617
5618 dd_dev_info(dd, "Send Error: %s\n",
5619 send_err_status_string(buf, sizeof(buf), reg));
5620
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005621 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5622 if (reg & (1ull << i))
5623 incr_cntr64(&dd->send_err_status_cnt[i]);
5624 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005625}
5626
5627/*
5628 * The maximum number of times the error clear down will loop before
5629 * blocking a repeating error. This value is arbitrary.
5630 */
5631#define MAX_CLEAR_COUNT 20
5632
5633/*
5634 * Clear and handle an error register. All error interrupts are funneled
5635 * through here to have a central location to correctly handle single-
5636 * or multi-shot errors.
5637 *
5638 * For non per-context registers, call this routine with a context value
5639 * of 0 so the per-context offset is zero.
5640 *
5641 * If the handler loops too many times, assume that something is wrong
5642 * and can't be fixed, so mask the error bits.
5643 */
5644static void interrupt_clear_down(struct hfi1_devdata *dd,
5645 u32 context,
5646 const struct err_reg_info *eri)
5647{
5648 u64 reg;
5649 u32 count;
5650
5651 /* read in a loop until no more errors are seen */
5652 count = 0;
5653 while (1) {
5654 reg = read_kctxt_csr(dd, context, eri->status);
5655 if (reg == 0)
5656 break;
5657 write_kctxt_csr(dd, context, eri->clear, reg);
5658 if (likely(eri->handler))
5659 eri->handler(dd, context, reg);
5660 count++;
5661 if (count > MAX_CLEAR_COUNT) {
5662 u64 mask;
5663
5664 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5665 eri->desc, reg);
5666 /*
5667 * Read-modify-write so any other masked bits
5668 * remain masked.
5669 */
5670 mask = read_kctxt_csr(dd, context, eri->mask);
5671 mask &= ~reg;
5672 write_kctxt_csr(dd, context, eri->mask, mask);
5673 break;
5674 }
5675 }
5676}
5677
5678/*
5679 * CCE block "misc" interrupt. Source is < 16.
5680 */
5681static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5682{
5683 const struct err_reg_info *eri = &misc_errs[source];
5684
5685 if (eri->handler) {
5686 interrupt_clear_down(dd, 0, eri);
5687 } else {
5688 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5689 source);
5690 }
5691}
5692
5693static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5694{
5695 return flag_string(buf, buf_len, flags,
5696 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5697}
5698
5699/*
5700 * Send context error interrupt. Source (hw_context) is < 160.
5701 *
5702 * All send context errors cause the send context to halt. The normal
5703 * clear-down mechanism cannot be used because we cannot clear the
5704 * error bits until several other long-running items are done first.
5705 * This is OK because with the context halted, nothing else is going
5706 * to happen on it anyway.
5707 */
5708static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5709 unsigned int hw_context)
5710{
5711 struct send_context_info *sci;
5712 struct send_context *sc;
5713 char flags[96];
5714 u64 status;
5715 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005716 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005717
5718 sw_index = dd->hw_to_sw[hw_context];
5719 if (sw_index >= dd->num_send_contexts) {
5720 dd_dev_err(dd,
5721 "out of range sw index %u for send context %u\n",
5722 sw_index, hw_context);
5723 return;
5724 }
5725 sci = &dd->send_contexts[sw_index];
5726 sc = sci->sc;
5727 if (!sc) {
5728 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5729 sw_index, hw_context);
5730 return;
5731 }
5732
5733 /* tell the software that a halt has begun */
5734 sc_stop(sc, SCF_HALTED);
5735
5736 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5737
5738 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5739 send_context_err_status_string(flags, sizeof(flags), status));
5740
5741 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5742 handle_send_egress_err_info(dd);
5743
5744 /*
5745 * Automatically restart halted kernel contexts out of interrupt
5746 * context. User contexts must ask the driver to restart the context.
5747 */
5748 if (sc->type != SC_USER)
5749 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005750
5751 /*
5752 * Update the counters for the corresponding status bits.
5753 * Note that these particular counters are aggregated over all
5754 * 160 contexts.
5755 */
5756 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5757 if (status & (1ull << i))
5758 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5759 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005760}
5761
5762static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5763 unsigned int source, u64 status)
5764{
5765 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005766 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005767
5768 sde = &dd->per_sdma[source];
5769#ifdef CONFIG_SDMA_VERBOSITY
5770 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5771 slashstrip(__FILE__), __LINE__, __func__);
5772 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5773 sde->this_idx, source, (unsigned long long)status);
5774#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005775 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005776 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005777
5778 /*
5779 * Update the counters for the corresponding status bits.
5780 * Note that these particular counters are aggregated over
5781 * all 16 DMA engines.
5782 */
5783 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5784 if (status & (1ull << i))
5785 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5786 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005787}
5788
5789/*
5790 * CCE block SDMA error interrupt. Source is < 16.
5791 */
5792static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5793{
5794#ifdef CONFIG_SDMA_VERBOSITY
5795 struct sdma_engine *sde = &dd->per_sdma[source];
5796
5797 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5798 slashstrip(__FILE__), __LINE__, __func__);
5799 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5800 source);
5801 sdma_dumpstate(sde);
5802#endif
5803 interrupt_clear_down(dd, source, &sdma_eng_err);
5804}
5805
5806/*
5807 * CCE block "various" interrupt. Source is < 8.
5808 */
5809static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5810{
5811 const struct err_reg_info *eri = &various_err[source];
5812
5813 /*
5814 * TCritInt cannot go through interrupt_clear_down()
5815 * because it is not a second tier interrupt. The handler
5816 * should be called directly.
5817 */
5818 if (source == TCRIT_INT_SOURCE)
5819 handle_temp_err(dd);
5820 else if (eri->handler)
5821 interrupt_clear_down(dd, 0, eri);
5822 else
5823 dd_dev_info(dd,
5824 "%s: Unimplemented/reserved interrupt %d\n",
5825 __func__, source);
5826}
5827
5828static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5829{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005830 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005831 struct hfi1_pportdata *ppd = dd->pport;
5832 unsigned long flags;
5833 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5834
5835 if (reg & QSFP_HFI0_MODPRST_N) {
5836
5837 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5838 __func__);
5839
5840 if (!qsfp_mod_present(ppd)) {
5841 ppd->driver_link_ready = 0;
5842 /*
5843 * Cable removed, reset all our information about the
5844 * cache and cable capabilities
5845 */
5846
5847 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5848 /*
5849 * We don't set cache_refresh_required here as we expect
5850 * an interrupt when a cable is inserted
5851 */
5852 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005853 ppd->qsfp_info.reset_needed = 0;
5854 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005855 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5856 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005857 /* Invert the ModPresent pin now to detect plug-in */
5858 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5859 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005860
5861 if ((ppd->offline_disabled_reason >
5862 HFI1_ODR_MASK(
5863 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) ||
5864 (ppd->offline_disabled_reason ==
5865 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5866 ppd->offline_disabled_reason =
5867 HFI1_ODR_MASK(
5868 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED);
5869
Mike Marciniszyn77241052015-07-30 15:17:43 -04005870 if (ppd->host_link_state == HLS_DN_POLL) {
5871 /*
5872 * The link is still in POLL. This means
5873 * that the normal link down processing
5874 * will not happen. We have to do it here
5875 * before turning the DC off.
5876 */
5877 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5878 }
5879 } else {
5880 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5881 ppd->qsfp_info.cache_valid = 0;
5882 ppd->qsfp_info.cache_refresh_required = 1;
5883 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5884 flags);
5885
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005886 /*
5887 * Stop inversion of ModPresent pin to detect
5888 * removal of the cable
5889 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005890 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005891 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5892 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5893
5894 ppd->offline_disabled_reason =
5895 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005896 }
5897 }
5898
5899 if (reg & QSFP_HFI0_INT_N) {
5900
5901 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5902 __func__);
5903 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5904 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005905 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5906 }
5907
5908 /* Schedule the QSFP work only if there is a cable attached. */
5909 if (qsfp_mod_present(ppd))
5910 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5911}
5912
5913static int request_host_lcb_access(struct hfi1_devdata *dd)
5914{
5915 int ret;
5916
5917 ret = do_8051_command(dd, HCMD_MISC,
5918 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5919 NULL);
5920 if (ret != HCMD_SUCCESS) {
5921 dd_dev_err(dd, "%s: command failed with error %d\n",
5922 __func__, ret);
5923 }
5924 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5925}
5926
5927static int request_8051_lcb_access(struct hfi1_devdata *dd)
5928{
5929 int ret;
5930
5931 ret = do_8051_command(dd, HCMD_MISC,
5932 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5933 NULL);
5934 if (ret != HCMD_SUCCESS) {
5935 dd_dev_err(dd, "%s: command failed with error %d\n",
5936 __func__, ret);
5937 }
5938 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5939}
5940
5941/*
5942 * Set the LCB selector - allow host access. The DCC selector always
5943 * points to the host.
5944 */
5945static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5946{
5947 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5948 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5949 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5950}
5951
5952/*
5953 * Clear the LCB selector - allow 8051 access. The DCC selector always
5954 * points to the host.
5955 */
5956static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5957{
5958 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5959 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5960}
5961
5962/*
5963 * Acquire LCB access from the 8051. If the host already has access,
5964 * just increment a counter. Otherwise, inform the 8051 that the
5965 * host is taking access.
5966 *
5967 * Returns:
5968 * 0 on success
5969 * -EBUSY if the 8051 has control and cannot be disturbed
5970 * -errno if unable to acquire access from the 8051
5971 */
5972int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5973{
5974 struct hfi1_pportdata *ppd = dd->pport;
5975 int ret = 0;
5976
5977 /*
5978 * Use the host link state lock so the operation of this routine
5979 * { link state check, selector change, count increment } can occur
5980 * as a unit against a link state change. Otherwise there is a
5981 * race between the state change and the count increment.
5982 */
5983 if (sleep_ok) {
5984 mutex_lock(&ppd->hls_lock);
5985 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03005986 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005987 udelay(1);
5988 }
5989
5990 /* this access is valid only when the link is up */
5991 if ((ppd->host_link_state & HLS_UP) == 0) {
5992 dd_dev_info(dd, "%s: link state %s not up\n",
5993 __func__, link_state_name(ppd->host_link_state));
5994 ret = -EBUSY;
5995 goto done;
5996 }
5997
5998 if (dd->lcb_access_count == 0) {
5999 ret = request_host_lcb_access(dd);
6000 if (ret) {
6001 dd_dev_err(dd,
6002 "%s: unable to acquire LCB access, err %d\n",
6003 __func__, ret);
6004 goto done;
6005 }
6006 set_host_lcb_access(dd);
6007 }
6008 dd->lcb_access_count++;
6009done:
6010 mutex_unlock(&ppd->hls_lock);
6011 return ret;
6012}
6013
6014/*
6015 * Release LCB access by decrementing the use count. If the count is moving
6016 * from 1 to 0, inform 8051 that it has control back.
6017 *
6018 * Returns:
6019 * 0 on success
6020 * -errno if unable to release access to the 8051
6021 */
6022int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6023{
6024 int ret = 0;
6025
6026 /*
6027 * Use the host link state lock because the acquire needed it.
6028 * Here, we only need to keep { selector change, count decrement }
6029 * as a unit.
6030 */
6031 if (sleep_ok) {
6032 mutex_lock(&dd->pport->hls_lock);
6033 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006034 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006035 udelay(1);
6036 }
6037
6038 if (dd->lcb_access_count == 0) {
6039 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6040 __func__);
6041 goto done;
6042 }
6043
6044 if (dd->lcb_access_count == 1) {
6045 set_8051_lcb_access(dd);
6046 ret = request_8051_lcb_access(dd);
6047 if (ret) {
6048 dd_dev_err(dd,
6049 "%s: unable to release LCB access, err %d\n",
6050 __func__, ret);
6051 /* restore host access if the grant didn't work */
6052 set_host_lcb_access(dd);
6053 goto done;
6054 }
6055 }
6056 dd->lcb_access_count--;
6057done:
6058 mutex_unlock(&dd->pport->hls_lock);
6059 return ret;
6060}
6061
6062/*
6063 * Initialize LCB access variables and state. Called during driver load,
6064 * after most of the initialization is finished.
6065 *
6066 * The DC default is LCB access on for the host. The driver defaults to
6067 * leaving access to the 8051. Assign access now - this constrains the call
6068 * to this routine to be after all LCB set-up is done. In particular, after
6069 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6070 */
6071static void init_lcb_access(struct hfi1_devdata *dd)
6072{
6073 dd->lcb_access_count = 0;
6074}
6075
6076/*
6077 * Write a response back to a 8051 request.
6078 */
6079static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6080{
6081 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6082 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6083 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6084 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6085}
6086
6087/*
6088 * Handle requests from the 8051.
6089 */
6090static void handle_8051_request(struct hfi1_devdata *dd)
6091{
6092 u64 reg;
6093 u16 data;
6094 u8 type;
6095
6096 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6097 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6098 return; /* no request */
6099
6100 /* zero out COMPLETED so the response is seen */
6101 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6102
6103 /* extract request details */
6104 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6105 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6106 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6107 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6108
6109 switch (type) {
6110 case HREQ_LOAD_CONFIG:
6111 case HREQ_SAVE_CONFIG:
6112 case HREQ_READ_CONFIG:
6113 case HREQ_SET_TX_EQ_ABS:
6114 case HREQ_SET_TX_EQ_REL:
6115 case HREQ_ENABLE:
6116 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6117 type);
6118 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6119 break;
6120
6121 case HREQ_CONFIG_DONE:
6122 hreq_response(dd, HREQ_SUCCESS, 0);
6123 break;
6124
6125 case HREQ_INTERFACE_TEST:
6126 hreq_response(dd, HREQ_SUCCESS, data);
6127 break;
6128
6129 default:
6130 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6131 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6132 break;
6133 }
6134}
6135
6136static void write_global_credit(struct hfi1_devdata *dd,
6137 u8 vau, u16 total, u16 shared)
6138{
6139 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6140 ((u64)total
6141 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6142 | ((u64)shared
6143 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6144 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6145}
6146
6147/*
6148 * Set up initial VL15 credits of the remote. Assumes the rest of
6149 * the CM credit registers are zero from a previous global or credit reset .
6150 */
6151void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6152{
6153 /* leave shared count at zero for both global and VL15 */
6154 write_global_credit(dd, vau, vl15buf, 0);
6155
6156 /* We may need some credits for another VL when sending packets
6157 * with the snoop interface. Dividing it down the middle for VL15
6158 * and VL0 should suffice.
6159 */
6160 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6161 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6162 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6163 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6164 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6165 } else {
6166 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6167 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6168 }
6169}
6170
6171/*
6172 * Zero all credit details from the previous connection and
6173 * reset the CM manager's internal counters.
6174 */
6175void reset_link_credits(struct hfi1_devdata *dd)
6176{
6177 int i;
6178
6179 /* remove all previous VL credit limits */
6180 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6181 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6182 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6183 write_global_credit(dd, 0, 0, 0);
6184 /* reset the CM block */
6185 pio_send_control(dd, PSC_CM_RESET);
6186}
6187
6188/* convert a vCU to a CU */
6189static u32 vcu_to_cu(u8 vcu)
6190{
6191 return 1 << vcu;
6192}
6193
6194/* convert a CU to a vCU */
6195static u8 cu_to_vcu(u32 cu)
6196{
6197 return ilog2(cu);
6198}
6199
6200/* convert a vAU to an AU */
6201static u32 vau_to_au(u8 vau)
6202{
6203 return 8 * (1 << vau);
6204}
6205
6206static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6207{
6208 ppd->sm_trap_qp = 0x0;
6209 ppd->sa_qp = 0x1;
6210}
6211
6212/*
6213 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6214 */
6215static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6216{
6217 u64 reg;
6218
6219 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6220 write_csr(dd, DC_LCB_CFG_RUN, 0);
6221 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6222 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6223 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6224 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6225 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6226 reg = read_csr(dd, DCC_CFG_RESET);
6227 write_csr(dd, DCC_CFG_RESET,
6228 reg
6229 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6230 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6231 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6232 if (!abort) {
6233 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6234 write_csr(dd, DCC_CFG_RESET, reg);
6235 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6236 }
6237}
6238
6239/*
6240 * This routine should be called after the link has been transitioned to
6241 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6242 * reset).
6243 *
6244 * The expectation is that the caller of this routine would have taken
6245 * care of properly transitioning the link into the correct state.
6246 */
6247static void dc_shutdown(struct hfi1_devdata *dd)
6248{
6249 unsigned long flags;
6250
6251 spin_lock_irqsave(&dd->dc8051_lock, flags);
6252 if (dd->dc_shutdown) {
6253 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6254 return;
6255 }
6256 dd->dc_shutdown = 1;
6257 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6258 /* Shutdown the LCB */
6259 lcb_shutdown(dd, 1);
6260 /* Going to OFFLINE would have causes the 8051 to put the
6261 * SerDes into reset already. Just need to shut down the 8051,
6262 * itself. */
6263 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6264}
6265
6266/* Calling this after the DC has been brought out of reset should not
6267 * do any damage. */
6268static void dc_start(struct hfi1_devdata *dd)
6269{
6270 unsigned long flags;
6271 int ret;
6272
6273 spin_lock_irqsave(&dd->dc8051_lock, flags);
6274 if (!dd->dc_shutdown)
6275 goto done;
6276 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6277 /* Take the 8051 out of reset */
6278 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6279 /* Wait until 8051 is ready */
6280 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6281 if (ret) {
6282 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6283 __func__);
6284 }
6285 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6286 write_csr(dd, DCC_CFG_RESET, 0x10);
6287 /* lcb_shutdown() with abort=1 does not restore these */
6288 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6289 spin_lock_irqsave(&dd->dc8051_lock, flags);
6290 dd->dc_shutdown = 0;
6291done:
6292 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6293}
6294
6295/*
6296 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6297 */
6298static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6299{
6300 u64 rx_radr, tx_radr;
6301 u32 version;
6302
6303 if (dd->icode != ICODE_FPGA_EMULATION)
6304 return;
6305
6306 /*
6307 * These LCB defaults on emulator _s are good, nothing to do here:
6308 * LCB_CFG_TX_FIFOS_RADR
6309 * LCB_CFG_RX_FIFOS_RADR
6310 * LCB_CFG_LN_DCLK
6311 * LCB_CFG_IGNORE_LOST_RCLK
6312 */
6313 if (is_emulator_s(dd))
6314 return;
6315 /* else this is _p */
6316
6317 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006318 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006319 version = 0x2d; /* all B0 use 0x2d or higher settings */
6320
6321 if (version <= 0x12) {
6322 /* release 0x12 and below */
6323
6324 /*
6325 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6326 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6327 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6328 */
6329 rx_radr =
6330 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6331 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6332 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6333 /*
6334 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6335 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6336 */
6337 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6338 } else if (version <= 0x18) {
6339 /* release 0x13 up to 0x18 */
6340 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6341 rx_radr =
6342 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6343 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6344 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6345 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6346 } else if (version == 0x19) {
6347 /* release 0x19 */
6348 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6349 rx_radr =
6350 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6351 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6352 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6353 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6354 } else if (version == 0x1a) {
6355 /* release 0x1a */
6356 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6357 rx_radr =
6358 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6359 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6360 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6361 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6362 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6363 } else {
6364 /* release 0x1b and higher */
6365 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6366 rx_radr =
6367 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6368 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6369 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6370 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6371 }
6372
6373 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6374 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6375 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6376 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6377 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6378}
6379
6380/*
6381 * Handle a SMA idle message
6382 *
6383 * This is a work-queue function outside of the interrupt.
6384 */
6385void handle_sma_message(struct work_struct *work)
6386{
6387 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6388 sma_message_work);
6389 struct hfi1_devdata *dd = ppd->dd;
6390 u64 msg;
6391 int ret;
6392
6393 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6394 is stripped off */
6395 ret = read_idle_sma(dd, &msg);
6396 if (ret)
6397 return;
6398 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6399 /*
6400 * React to the SMA message. Byte[1] (0 for us) is the command.
6401 */
6402 switch (msg & 0xff) {
6403 case SMA_IDLE_ARM:
6404 /*
6405 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6406 * State Transitions
6407 *
6408 * Only expected in INIT or ARMED, discard otherwise.
6409 */
6410 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6411 ppd->neighbor_normal = 1;
6412 break;
6413 case SMA_IDLE_ACTIVE:
6414 /*
6415 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6416 * State Transitions
6417 *
6418 * Can activate the node. Discard otherwise.
6419 */
6420 if (ppd->host_link_state == HLS_UP_ARMED
6421 && ppd->is_active_optimize_enabled) {
6422 ppd->neighbor_normal = 1;
6423 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6424 if (ret)
6425 dd_dev_err(
6426 dd,
6427 "%s: received Active SMA idle message, couldn't set link to Active\n",
6428 __func__);
6429 }
6430 break;
6431 default:
6432 dd_dev_err(dd,
6433 "%s: received unexpected SMA idle message 0x%llx\n",
6434 __func__, msg);
6435 break;
6436 }
6437}
6438
6439static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6440{
6441 u64 rcvctrl;
6442 unsigned long flags;
6443
6444 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6445 rcvctrl = read_csr(dd, RCV_CTRL);
6446 rcvctrl |= add;
6447 rcvctrl &= ~clear;
6448 write_csr(dd, RCV_CTRL, rcvctrl);
6449 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6450}
6451
6452static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6453{
6454 adjust_rcvctrl(dd, add, 0);
6455}
6456
6457static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6458{
6459 adjust_rcvctrl(dd, 0, clear);
6460}
6461
6462/*
6463 * Called from all interrupt handlers to start handling an SPC freeze.
6464 */
6465void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6466{
6467 struct hfi1_devdata *dd = ppd->dd;
6468 struct send_context *sc;
6469 int i;
6470
6471 if (flags & FREEZE_SELF)
6472 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6473
6474 /* enter frozen mode */
6475 dd->flags |= HFI1_FROZEN;
6476
6477 /* notify all SDMA engines that they are going into a freeze */
6478 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6479
6480 /* do halt pre-handling on all enabled send contexts */
6481 for (i = 0; i < dd->num_send_contexts; i++) {
6482 sc = dd->send_contexts[i].sc;
6483 if (sc && (sc->flags & SCF_ENABLED))
6484 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6485 }
6486
6487 /* Send context are frozen. Notify user space */
6488 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6489
6490 if (flags & FREEZE_ABORT) {
6491 dd_dev_err(dd,
6492 "Aborted freeze recovery. Please REBOOT system\n");
6493 return;
6494 }
6495 /* queue non-interrupt handler */
6496 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6497}
6498
6499/*
6500 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6501 * depending on the "freeze" parameter.
6502 *
6503 * No need to return an error if it times out, our only option
6504 * is to proceed anyway.
6505 */
6506static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6507{
6508 unsigned long timeout;
6509 u64 reg;
6510
6511 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6512 while (1) {
6513 reg = read_csr(dd, CCE_STATUS);
6514 if (freeze) {
6515 /* waiting until all indicators are set */
6516 if ((reg & ALL_FROZE) == ALL_FROZE)
6517 return; /* all done */
6518 } else {
6519 /* waiting until all indicators are clear */
6520 if ((reg & ALL_FROZE) == 0)
6521 return; /* all done */
6522 }
6523
6524 if (time_after(jiffies, timeout)) {
6525 dd_dev_err(dd,
6526 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6527 freeze ? "" : "un",
6528 reg & ALL_FROZE,
6529 freeze ? ALL_FROZE : 0ull);
6530 return;
6531 }
6532 usleep_range(80, 120);
6533 }
6534}
6535
6536/*
6537 * Do all freeze handling for the RXE block.
6538 */
6539static void rxe_freeze(struct hfi1_devdata *dd)
6540{
6541 int i;
6542
6543 /* disable port */
6544 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6545
6546 /* disable all receive contexts */
6547 for (i = 0; i < dd->num_rcv_contexts; i++)
6548 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6549}
6550
6551/*
6552 * Unfreeze handling for the RXE block - kernel contexts only.
6553 * This will also enable the port. User contexts will do unfreeze
6554 * handling on a per-context basis as they call into the driver.
6555 *
6556 */
6557static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6558{
6559 int i;
6560
6561 /* enable all kernel contexts */
6562 for (i = 0; i < dd->n_krcv_queues; i++)
6563 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6564
6565 /* enable port */
6566 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6567}
6568
6569/*
6570 * Non-interrupt SPC freeze handling.
6571 *
6572 * This is a work-queue function outside of the triggering interrupt.
6573 */
6574void handle_freeze(struct work_struct *work)
6575{
6576 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6577 freeze_work);
6578 struct hfi1_devdata *dd = ppd->dd;
6579
6580 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006581 wait_for_freeze_status(dd, 1);
6582
6583 /* SPC is now frozen */
6584
6585 /* do send PIO freeze steps */
6586 pio_freeze(dd);
6587
6588 /* do send DMA freeze steps */
6589 sdma_freeze(dd);
6590
6591 /* do send egress freeze steps - nothing to do */
6592
6593 /* do receive freeze steps */
6594 rxe_freeze(dd);
6595
6596 /*
6597 * Unfreeze the hardware - clear the freeze, wait for each
6598 * block's frozen bit to clear, then clear the frozen flag.
6599 */
6600 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6601 wait_for_freeze_status(dd, 0);
6602
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006603 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006604 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6605 wait_for_freeze_status(dd, 1);
6606 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6607 wait_for_freeze_status(dd, 0);
6608 }
6609
6610 /* do send PIO unfreeze steps for kernel contexts */
6611 pio_kernel_unfreeze(dd);
6612
6613 /* do send DMA unfreeze steps */
6614 sdma_unfreeze(dd);
6615
6616 /* do send egress unfreeze steps - nothing to do */
6617
6618 /* do receive unfreeze steps for kernel contexts */
6619 rxe_kernel_unfreeze(dd);
6620
6621 /*
6622 * The unfreeze procedure touches global device registers when
6623 * it disables and re-enables RXE. Mark the device unfrozen
6624 * after all that is done so other parts of the driver waiting
6625 * for the device to unfreeze don't do things out of order.
6626 *
6627 * The above implies that the meaning of HFI1_FROZEN flag is
6628 * "Device has gone into freeze mode and freeze mode handling
6629 * is still in progress."
6630 *
6631 * The flag will be removed when freeze mode processing has
6632 * completed.
6633 */
6634 dd->flags &= ~HFI1_FROZEN;
6635 wake_up(&dd->event_queue);
6636
6637 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006638}
6639
6640/*
6641 * Handle a link up interrupt from the 8051.
6642 *
6643 * This is a work-queue function outside of the interrupt.
6644 */
6645void handle_link_up(struct work_struct *work)
6646{
6647 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6648 link_up_work);
6649 set_link_state(ppd, HLS_UP_INIT);
6650
6651 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6652 read_ltp_rtt(ppd->dd);
6653 /*
6654 * OPA specifies that certain counters are cleared on a transition
6655 * to link up, so do that.
6656 */
6657 clear_linkup_counters(ppd->dd);
6658 /*
6659 * And (re)set link up default values.
6660 */
6661 set_linkup_defaults(ppd);
6662
6663 /* enforce link speed enabled */
6664 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6665 /* oops - current speed is not enabled, bounce */
6666 dd_dev_err(ppd->dd,
6667 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6668 ppd->link_speed_active, ppd->link_speed_enabled);
6669 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6670 OPA_LINKDOWN_REASON_SPEED_POLICY);
6671 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006672 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006673 start_link(ppd);
6674 }
6675}
6676
6677/* Several pieces of LNI information were cached for SMA in ppd.
6678 * Reset these on link down */
6679static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6680{
6681 ppd->neighbor_guid = 0;
6682 ppd->neighbor_port_number = 0;
6683 ppd->neighbor_type = 0;
6684 ppd->neighbor_fm_security = 0;
6685}
6686
6687/*
6688 * Handle a link down interrupt from the 8051.
6689 *
6690 * This is a work-queue function outside of the interrupt.
6691 */
6692void handle_link_down(struct work_struct *work)
6693{
6694 u8 lcl_reason, neigh_reason = 0;
6695 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6696 link_down_work);
6697
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006698 if ((ppd->host_link_state &
6699 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6700 ppd->port_type == PORT_TYPE_FIXED)
6701 ppd->offline_disabled_reason =
6702 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6703
6704 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006705 set_link_state(ppd, HLS_DN_OFFLINE);
6706
6707 lcl_reason = 0;
6708 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6709
6710 /*
6711 * If no reason, assume peer-initiated but missed
6712 * LinkGoingDown idle flits.
6713 */
6714 if (neigh_reason == 0)
6715 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6716
6717 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6718
6719 reset_neighbor_info(ppd);
6720
6721 /* disable the port */
6722 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6723
6724 /* If there is no cable attached, turn the DC off. Otherwise,
6725 * start the link bring up. */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006726 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006727 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006728 } else {
6729 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006730 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006731 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006732}
6733
6734void handle_link_bounce(struct work_struct *work)
6735{
6736 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6737 link_bounce_work);
6738
6739 /*
6740 * Only do something if the link is currently up.
6741 */
6742 if (ppd->host_link_state & HLS_UP) {
6743 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006744 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006745 start_link(ppd);
6746 } else {
6747 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6748 __func__, link_state_name(ppd->host_link_state));
6749 }
6750}
6751
6752/*
6753 * Mask conversion: Capability exchange to Port LTP. The capability
6754 * exchange has an implicit 16b CRC that is mandatory.
6755 */
6756static int cap_to_port_ltp(int cap)
6757{
6758 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6759
6760 if (cap & CAP_CRC_14B)
6761 port_ltp |= PORT_LTP_CRC_MODE_14;
6762 if (cap & CAP_CRC_48B)
6763 port_ltp |= PORT_LTP_CRC_MODE_48;
6764 if (cap & CAP_CRC_12B_16B_PER_LANE)
6765 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6766
6767 return port_ltp;
6768}
6769
6770/*
6771 * Convert an OPA Port LTP mask to capability mask
6772 */
6773int port_ltp_to_cap(int port_ltp)
6774{
6775 int cap_mask = 0;
6776
6777 if (port_ltp & PORT_LTP_CRC_MODE_14)
6778 cap_mask |= CAP_CRC_14B;
6779 if (port_ltp & PORT_LTP_CRC_MODE_48)
6780 cap_mask |= CAP_CRC_48B;
6781 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6782 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6783
6784 return cap_mask;
6785}
6786
6787/*
6788 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6789 */
6790static int lcb_to_port_ltp(int lcb_crc)
6791{
6792 int port_ltp = 0;
6793
6794 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6795 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6796 else if (lcb_crc == LCB_CRC_48B)
6797 port_ltp = PORT_LTP_CRC_MODE_48;
6798 else if (lcb_crc == LCB_CRC_14B)
6799 port_ltp = PORT_LTP_CRC_MODE_14;
6800 else
6801 port_ltp = PORT_LTP_CRC_MODE_16;
6802
6803 return port_ltp;
6804}
6805
6806/*
6807 * Our neighbor has indicated that we are allowed to act as a fabric
6808 * manager, so place the full management partition key in the second
6809 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6810 * that we should already have the limited management partition key in
6811 * array element 1, and also that the port is not yet up when
6812 * add_full_mgmt_pkey() is invoked.
6813 */
6814static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6815{
6816 struct hfi1_devdata *dd = ppd->dd;
6817
Dean Luick87645222015-12-01 15:38:21 -05006818 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6819 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6820 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6821 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006822 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6823 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6824}
6825
6826/*
6827 * Convert the given link width to the OPA link width bitmask.
6828 */
6829static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6830{
6831 switch (width) {
6832 case 0:
6833 /*
6834 * Simulator and quick linkup do not set the width.
6835 * Just set it to 4x without complaint.
6836 */
6837 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6838 return OPA_LINK_WIDTH_4X;
6839 return 0; /* no lanes up */
6840 case 1: return OPA_LINK_WIDTH_1X;
6841 case 2: return OPA_LINK_WIDTH_2X;
6842 case 3: return OPA_LINK_WIDTH_3X;
6843 default:
6844 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6845 __func__, width);
6846 /* fall through */
6847 case 4: return OPA_LINK_WIDTH_4X;
6848 }
6849}
6850
6851/*
6852 * Do a population count on the bottom nibble.
6853 */
6854static const u8 bit_counts[16] = {
6855 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6856};
6857static inline u8 nibble_to_count(u8 nibble)
6858{
6859 return bit_counts[nibble & 0xf];
6860}
6861
6862/*
6863 * Read the active lane information from the 8051 registers and return
6864 * their widths.
6865 *
6866 * Active lane information is found in these 8051 registers:
6867 * enable_lane_tx
6868 * enable_lane_rx
6869 */
6870static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6871 u16 *rx_width)
6872{
6873 u16 tx, rx;
6874 u8 enable_lane_rx;
6875 u8 enable_lane_tx;
6876 u8 tx_polarity_inversion;
6877 u8 rx_polarity_inversion;
6878 u8 max_rate;
6879
6880 /* read the active lanes */
6881 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6882 &rx_polarity_inversion, &max_rate);
6883 read_local_lni(dd, &enable_lane_rx);
6884
6885 /* convert to counts */
6886 tx = nibble_to_count(enable_lane_tx);
6887 rx = nibble_to_count(enable_lane_rx);
6888
6889 /*
6890 * Set link_speed_active here, overriding what was set in
6891 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
6892 * set the max_rate field in handle_verify_cap until v0.19.
6893 */
6894 if ((dd->icode == ICODE_RTL_SILICON)
6895 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6896 /* max_rate: 0 = 12.5G, 1 = 25G */
6897 switch (max_rate) {
6898 case 0:
6899 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6900 break;
6901 default:
6902 dd_dev_err(dd,
6903 "%s: unexpected max rate %d, using 25Gb\n",
6904 __func__, (int)max_rate);
6905 /* fall through */
6906 case 1:
6907 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6908 break;
6909 }
6910 }
6911
6912 dd_dev_info(dd,
6913 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6914 enable_lane_tx, tx, enable_lane_rx, rx);
6915 *tx_width = link_width_to_bits(dd, tx);
6916 *rx_width = link_width_to_bits(dd, rx);
6917}
6918
6919/*
6920 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6921 * Valid after the end of VerifyCap and during LinkUp. Does not change
6922 * after link up. I.e. look elsewhere for downgrade information.
6923 *
6924 * Bits are:
6925 * + bits [7:4] contain the number of active transmitters
6926 * + bits [3:0] contain the number of active receivers
6927 * These are numbers 1 through 4 and can be different values if the
6928 * link is asymmetric.
6929 *
6930 * verify_cap_local_fm_link_width[0] retains its original value.
6931 */
6932static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6933 u16 *rx_width)
6934{
6935 u16 widths, tx, rx;
6936 u8 misc_bits, local_flags;
6937 u16 active_tx, active_rx;
6938
6939 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6940 tx = widths >> 12;
6941 rx = (widths >> 8) & 0xf;
6942
6943 *tx_width = link_width_to_bits(dd, tx);
6944 *rx_width = link_width_to_bits(dd, rx);
6945
6946 /* print the active widths */
6947 get_link_widths(dd, &active_tx, &active_rx);
6948}
6949
6950/*
6951 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6952 * hardware information when the link first comes up.
6953 *
6954 * The link width is not available until after VerifyCap.AllFramesReceived
6955 * (the trigger for handle_verify_cap), so this is outside that routine
6956 * and should be called when the 8051 signals linkup.
6957 */
6958void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6959{
6960 u16 tx_width, rx_width;
6961
6962 /* get end-of-LNI link widths */
6963 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
6964
6965 /* use tx_width as the link is supposed to be symmetric on link up */
6966 ppd->link_width_active = tx_width;
6967 /* link width downgrade active (LWD.A) starts out matching LW.A */
6968 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
6969 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
6970 /* per OPA spec, on link up LWD.E resets to LWD.S */
6971 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
6972 /* cache the active egress rate (units {10^6 bits/sec]) */
6973 ppd->current_egress_rate = active_egress_rate(ppd);
6974}
6975
6976/*
6977 * Handle a verify capabilities interrupt from the 8051.
6978 *
6979 * This is a work-queue function outside of the interrupt.
6980 */
6981void handle_verify_cap(struct work_struct *work)
6982{
6983 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6984 link_vc_work);
6985 struct hfi1_devdata *dd = ppd->dd;
6986 u64 reg;
6987 u8 power_management;
6988 u8 continious;
6989 u8 vcu;
6990 u8 vau;
6991 u8 z;
6992 u16 vl15buf;
6993 u16 link_widths;
6994 u16 crc_mask;
6995 u16 crc_val;
6996 u16 device_id;
6997 u16 active_tx, active_rx;
6998 u8 partner_supported_crc;
6999 u8 remote_tx_rate;
7000 u8 device_rev;
7001
7002 set_link_state(ppd, HLS_VERIFY_CAP);
7003
7004 lcb_shutdown(dd, 0);
7005 adjust_lcb_for_fpga_serdes(dd);
7006
7007 /*
7008 * These are now valid:
7009 * remote VerifyCap fields in the general LNI config
7010 * CSR DC8051_STS_REMOTE_GUID
7011 * CSR DC8051_STS_REMOTE_NODE_TYPE
7012 * CSR DC8051_STS_REMOTE_FM_SECURITY
7013 * CSR DC8051_STS_REMOTE_PORT_NO
7014 */
7015
7016 read_vc_remote_phy(dd, &power_management, &continious);
7017 read_vc_remote_fabric(
7018 dd,
7019 &vau,
7020 &z,
7021 &vcu,
7022 &vl15buf,
7023 &partner_supported_crc);
7024 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7025 read_remote_device_id(dd, &device_id, &device_rev);
7026 /*
7027 * And the 'MgmtAllowed' information, which is exchanged during
7028 * LNI, is also be available at this point.
7029 */
7030 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7031 /* print the active widths */
7032 get_link_widths(dd, &active_tx, &active_rx);
7033 dd_dev_info(dd,
7034 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7035 (int)power_management, (int)continious);
7036 dd_dev_info(dd,
7037 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7038 (int)vau,
7039 (int)z,
7040 (int)vcu,
7041 (int)vl15buf,
7042 (int)partner_supported_crc);
7043 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7044 (u32)remote_tx_rate, (u32)link_widths);
7045 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7046 (u32)device_id, (u32)device_rev);
7047 /*
7048 * The peer vAU value just read is the peer receiver value. HFI does
7049 * not support a transmit vAU of 0 (AU == 8). We advertised that
7050 * with Z=1 in the fabric capabilities sent to the peer. The peer
7051 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7052 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7053 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7054 * subject to the Z value exception.
7055 */
7056 if (vau == 0)
7057 vau = 1;
7058 set_up_vl15(dd, vau, vl15buf);
7059
7060 /* set up the LCB CRC mode */
7061 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7062
7063 /* order is important: use the lowest bit in common */
7064 if (crc_mask & CAP_CRC_14B)
7065 crc_val = LCB_CRC_14B;
7066 else if (crc_mask & CAP_CRC_48B)
7067 crc_val = LCB_CRC_48B;
7068 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7069 crc_val = LCB_CRC_12B_16B_PER_LANE;
7070 else
7071 crc_val = LCB_CRC_16B;
7072
7073 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7074 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7075 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7076
7077 /* set (14b only) or clear sideband credit */
7078 reg = read_csr(dd, SEND_CM_CTRL);
7079 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7080 write_csr(dd, SEND_CM_CTRL,
7081 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7082 } else {
7083 write_csr(dd, SEND_CM_CTRL,
7084 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7085 }
7086
7087 ppd->link_speed_active = 0; /* invalid value */
7088 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7089 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7090 switch (remote_tx_rate) {
7091 case 0:
7092 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7093 break;
7094 case 1:
7095 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7096 break;
7097 }
7098 } else {
7099 /* actual rate is highest bit of the ANDed rates */
7100 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7101
7102 if (rate & 2)
7103 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7104 else if (rate & 1)
7105 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7106 }
7107 if (ppd->link_speed_active == 0) {
7108 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7109 __func__, (int)remote_tx_rate);
7110 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7111 }
7112
7113 /*
7114 * Cache the values of the supported, enabled, and active
7115 * LTP CRC modes to return in 'portinfo' queries. But the bit
7116 * flags that are returned in the portinfo query differ from
7117 * what's in the link_crc_mask, crc_sizes, and crc_val
7118 * variables. Convert these here.
7119 */
7120 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7121 /* supported crc modes */
7122 ppd->port_ltp_crc_mode |=
7123 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7124 /* enabled crc modes */
7125 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7126 /* active crc mode */
7127
7128 /* set up the remote credit return table */
7129 assign_remote_cm_au_table(dd, vcu);
7130
7131 /*
7132 * The LCB is reset on entry to handle_verify_cap(), so this must
7133 * be applied on every link up.
7134 *
7135 * Adjust LCB error kill enable to kill the link if
7136 * these RBUF errors are seen:
7137 * REPLAY_BUF_MBE_SMASK
7138 * FLIT_INPUT_BUF_MBE_SMASK
7139 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007140 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007141 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7142 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7143 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7144 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7145 }
7146
7147 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7148 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7149
7150 /* give 8051 access to the LCB CSRs */
7151 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7152 set_8051_lcb_access(dd);
7153
7154 ppd->neighbor_guid =
7155 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7156 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7157 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7158 ppd->neighbor_type =
7159 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7160 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7161 ppd->neighbor_fm_security =
7162 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7163 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7164 dd_dev_info(dd,
7165 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7166 ppd->neighbor_guid, ppd->neighbor_type,
7167 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7168 if (ppd->mgmt_allowed)
7169 add_full_mgmt_pkey(ppd);
7170
7171 /* tell the 8051 to go to LinkUp */
7172 set_link_state(ppd, HLS_GOING_UP);
7173}
7174
7175/*
7176 * Apply the link width downgrade enabled policy against the current active
7177 * link widths.
7178 *
7179 * Called when the enabled policy changes or the active link widths change.
7180 */
7181void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7182{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007183 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007184 int tries;
7185 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007186 u16 tx, rx;
7187
Dean Luick323fd782015-11-16 21:59:24 -05007188 /* use the hls lock to avoid a race with actual link up */
7189 tries = 0;
7190retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007191 mutex_lock(&ppd->hls_lock);
7192 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007193 if (!(ppd->host_link_state & HLS_UP)) {
7194 /* still going up..wait and retry */
7195 if (ppd->host_link_state & HLS_GOING_UP) {
7196 if (++tries < 1000) {
7197 mutex_unlock(&ppd->hls_lock);
7198 usleep_range(100, 120); /* arbitrary */
7199 goto retry;
7200 }
7201 dd_dev_err(ppd->dd,
7202 "%s: giving up waiting for link state change\n",
7203 __func__);
7204 }
7205 goto done;
7206 }
7207
7208 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007209
7210 if (refresh_widths) {
7211 get_link_widths(ppd->dd, &tx, &rx);
7212 ppd->link_width_downgrade_tx_active = tx;
7213 ppd->link_width_downgrade_rx_active = rx;
7214 }
7215
7216 if (lwde == 0) {
7217 /* downgrade is disabled */
7218
7219 /* bounce if not at starting active width */
7220 if ((ppd->link_width_active !=
7221 ppd->link_width_downgrade_tx_active)
7222 || (ppd->link_width_active !=
7223 ppd->link_width_downgrade_rx_active)) {
7224 dd_dev_err(ppd->dd,
7225 "Link downgrade is disabled and link has downgraded, downing link\n");
7226 dd_dev_err(ppd->dd,
7227 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7228 ppd->link_width_active,
7229 ppd->link_width_downgrade_tx_active,
7230 ppd->link_width_downgrade_rx_active);
7231 do_bounce = 1;
7232 }
7233 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7234 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7235 /* Tx or Rx is outside the enabled policy */
7236 dd_dev_err(ppd->dd,
7237 "Link is outside of downgrade allowed, downing link\n");
7238 dd_dev_err(ppd->dd,
7239 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7240 lwde,
7241 ppd->link_width_downgrade_tx_active,
7242 ppd->link_width_downgrade_rx_active);
7243 do_bounce = 1;
7244 }
7245
Dean Luick323fd782015-11-16 21:59:24 -05007246done:
7247 mutex_unlock(&ppd->hls_lock);
7248
Mike Marciniszyn77241052015-07-30 15:17:43 -04007249 if (do_bounce) {
7250 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7251 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7252 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007253 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007254 start_link(ppd);
7255 }
7256}
7257
7258/*
7259 * Handle a link downgrade interrupt from the 8051.
7260 *
7261 * This is a work-queue function outside of the interrupt.
7262 */
7263void handle_link_downgrade(struct work_struct *work)
7264{
7265 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7266 link_downgrade_work);
7267
7268 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7269 apply_link_downgrade_policy(ppd, 1);
7270}
7271
7272static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7273{
7274 return flag_string(buf, buf_len, flags, dcc_err_flags,
7275 ARRAY_SIZE(dcc_err_flags));
7276}
7277
7278static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7279{
7280 return flag_string(buf, buf_len, flags, lcb_err_flags,
7281 ARRAY_SIZE(lcb_err_flags));
7282}
7283
7284static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7285{
7286 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7287 ARRAY_SIZE(dc8051_err_flags));
7288}
7289
7290static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7291{
7292 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7293 ARRAY_SIZE(dc8051_info_err_flags));
7294}
7295
7296static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7297{
7298 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7299 ARRAY_SIZE(dc8051_info_host_msg_flags));
7300}
7301
7302static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7303{
7304 struct hfi1_pportdata *ppd = dd->pport;
7305 u64 info, err, host_msg;
7306 int queue_link_down = 0;
7307 char buf[96];
7308
7309 /* look at the flags */
7310 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7311 /* 8051 information set by firmware */
7312 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7313 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7314 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7315 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7316 host_msg = (info >>
7317 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7318 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7319
7320 /*
7321 * Handle error flags.
7322 */
7323 if (err & FAILED_LNI) {
7324 /*
7325 * LNI error indications are cleared by the 8051
7326 * only when starting polling. Only pay attention
7327 * to them when in the states that occur during
7328 * LNI.
7329 */
7330 if (ppd->host_link_state
7331 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7332 queue_link_down = 1;
7333 dd_dev_info(dd, "Link error: %s\n",
7334 dc8051_info_err_string(buf,
7335 sizeof(buf),
7336 err & FAILED_LNI));
7337 }
7338 err &= ~(u64)FAILED_LNI;
7339 }
Dean Luick6d014532015-12-01 15:38:23 -05007340 /* unknown frames can happen durning LNI, just count */
7341 if (err & UNKNOWN_FRAME) {
7342 ppd->unknown_frame_count++;
7343 err &= ~(u64)UNKNOWN_FRAME;
7344 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007345 if (err) {
7346 /* report remaining errors, but do not do anything */
7347 dd_dev_err(dd, "8051 info error: %s\n",
7348 dc8051_info_err_string(buf, sizeof(buf), err));
7349 }
7350
7351 /*
7352 * Handle host message flags.
7353 */
7354 if (host_msg & HOST_REQ_DONE) {
7355 /*
7356 * Presently, the driver does a busy wait for
7357 * host requests to complete. This is only an
7358 * informational message.
7359 * NOTE: The 8051 clears the host message
7360 * information *on the next 8051 command*.
7361 * Therefore, when linkup is achieved,
7362 * this flag will still be set.
7363 */
7364 host_msg &= ~(u64)HOST_REQ_DONE;
7365 }
7366 if (host_msg & BC_SMA_MSG) {
7367 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7368 host_msg &= ~(u64)BC_SMA_MSG;
7369 }
7370 if (host_msg & LINKUP_ACHIEVED) {
7371 dd_dev_info(dd, "8051: Link up\n");
7372 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7373 host_msg &= ~(u64)LINKUP_ACHIEVED;
7374 }
7375 if (host_msg & EXT_DEVICE_CFG_REQ) {
7376 handle_8051_request(dd);
7377 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7378 }
7379 if (host_msg & VERIFY_CAP_FRAME) {
7380 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7381 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7382 }
7383 if (host_msg & LINK_GOING_DOWN) {
7384 const char *extra = "";
7385 /* no downgrade action needed if going down */
7386 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7387 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7388 extra = " (ignoring downgrade)";
7389 }
7390 dd_dev_info(dd, "8051: Link down%s\n", extra);
7391 queue_link_down = 1;
7392 host_msg &= ~(u64)LINK_GOING_DOWN;
7393 }
7394 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7395 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7396 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7397 }
7398 if (host_msg) {
7399 /* report remaining messages, but do not do anything */
7400 dd_dev_info(dd, "8051 info host message: %s\n",
7401 dc8051_info_host_msg_string(buf, sizeof(buf),
7402 host_msg));
7403 }
7404
7405 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7406 }
7407 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7408 /*
7409 * Lost the 8051 heartbeat. If this happens, we
7410 * receive constant interrupts about it. Disable
7411 * the interrupt after the first.
7412 */
7413 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7414 write_csr(dd, DC_DC8051_ERR_EN,
7415 read_csr(dd, DC_DC8051_ERR_EN)
7416 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7417
7418 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7419 }
7420 if (reg) {
7421 /* report the error, but do not do anything */
7422 dd_dev_err(dd, "8051 error: %s\n",
7423 dc8051_err_string(buf, sizeof(buf), reg));
7424 }
7425
7426 if (queue_link_down) {
7427 /* if the link is already going down or disabled, do not
7428 * queue another */
7429 if ((ppd->host_link_state
7430 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7431 || ppd->link_enabled == 0) {
7432 dd_dev_info(dd, "%s: not queuing link down\n",
7433 __func__);
7434 } else {
7435 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7436 }
7437 }
7438}
7439
7440static const char * const fm_config_txt[] = {
7441[0] =
7442 "BadHeadDist: Distance violation between two head flits",
7443[1] =
7444 "BadTailDist: Distance violation between two tail flits",
7445[2] =
7446 "BadCtrlDist: Distance violation between two credit control flits",
7447[3] =
7448 "BadCrdAck: Credits return for unsupported VL",
7449[4] =
7450 "UnsupportedVLMarker: Received VL Marker",
7451[5] =
7452 "BadPreempt: Exceeded the preemption nesting level",
7453[6] =
7454 "BadControlFlit: Received unsupported control flit",
7455/* no 7 */
7456[8] =
7457 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7458};
7459
7460static const char * const port_rcv_txt[] = {
7461[1] =
7462 "BadPktLen: Illegal PktLen",
7463[2] =
7464 "PktLenTooLong: Packet longer than PktLen",
7465[3] =
7466 "PktLenTooShort: Packet shorter than PktLen",
7467[4] =
7468 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7469[5] =
7470 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7471[6] =
7472 "BadL2: Illegal L2 opcode",
7473[7] =
7474 "BadSC: Unsupported SC",
7475[9] =
7476 "BadRC: Illegal RC",
7477[11] =
7478 "PreemptError: Preempting with same VL",
7479[12] =
7480 "PreemptVL15: Preempting a VL15 packet",
7481};
7482
7483#define OPA_LDR_FMCONFIG_OFFSET 16
7484#define OPA_LDR_PORTRCV_OFFSET 0
7485static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7486{
7487 u64 info, hdr0, hdr1;
7488 const char *extra;
7489 char buf[96];
7490 struct hfi1_pportdata *ppd = dd->pport;
7491 u8 lcl_reason = 0;
7492 int do_bounce = 0;
7493
7494 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7495 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7496 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7497 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7498 /* set status bit */
7499 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7500 }
7501 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7502 }
7503
7504 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7505 struct hfi1_pportdata *ppd = dd->pport;
7506 /* this counter saturates at (2^32) - 1 */
7507 if (ppd->link_downed < (u32)UINT_MAX)
7508 ppd->link_downed++;
7509 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7510 }
7511
7512 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7513 u8 reason_valid = 1;
7514
7515 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7516 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7517 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7518 /* set status bit */
7519 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7520 }
7521 switch (info) {
7522 case 0:
7523 case 1:
7524 case 2:
7525 case 3:
7526 case 4:
7527 case 5:
7528 case 6:
7529 extra = fm_config_txt[info];
7530 break;
7531 case 8:
7532 extra = fm_config_txt[info];
7533 if (ppd->port_error_action &
7534 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7535 do_bounce = 1;
7536 /*
7537 * lcl_reason cannot be derived from info
7538 * for this error
7539 */
7540 lcl_reason =
7541 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7542 }
7543 break;
7544 default:
7545 reason_valid = 0;
7546 snprintf(buf, sizeof(buf), "reserved%lld", info);
7547 extra = buf;
7548 break;
7549 }
7550
7551 if (reason_valid && !do_bounce) {
7552 do_bounce = ppd->port_error_action &
7553 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7554 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7555 }
7556
7557 /* just report this */
7558 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7559 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7560 }
7561
7562 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7563 u8 reason_valid = 1;
7564
7565 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7566 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7567 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7568 if (!(dd->err_info_rcvport.status_and_code &
7569 OPA_EI_STATUS_SMASK)) {
7570 dd->err_info_rcvport.status_and_code =
7571 info & OPA_EI_CODE_SMASK;
7572 /* set status bit */
7573 dd->err_info_rcvport.status_and_code |=
7574 OPA_EI_STATUS_SMASK;
7575 /* save first 2 flits in the packet that caused
7576 * the error */
7577 dd->err_info_rcvport.packet_flit1 = hdr0;
7578 dd->err_info_rcvport.packet_flit2 = hdr1;
7579 }
7580 switch (info) {
7581 case 1:
7582 case 2:
7583 case 3:
7584 case 4:
7585 case 5:
7586 case 6:
7587 case 7:
7588 case 9:
7589 case 11:
7590 case 12:
7591 extra = port_rcv_txt[info];
7592 break;
7593 default:
7594 reason_valid = 0;
7595 snprintf(buf, sizeof(buf), "reserved%lld", info);
7596 extra = buf;
7597 break;
7598 }
7599
7600 if (reason_valid && !do_bounce) {
7601 do_bounce = ppd->port_error_action &
7602 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7603 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7604 }
7605
7606 /* just report this */
7607 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7608 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7609 hdr0, hdr1);
7610
7611 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7612 }
7613
7614 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7615 /* informative only */
7616 dd_dev_info(dd, "8051 access to LCB blocked\n");
7617 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7618 }
7619 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7620 /* informative only */
7621 dd_dev_info(dd, "host access to LCB blocked\n");
7622 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7623 }
7624
7625 /* report any remaining errors */
7626 if (reg)
7627 dd_dev_info(dd, "DCC Error: %s\n",
7628 dcc_err_string(buf, sizeof(buf), reg));
7629
7630 if (lcl_reason == 0)
7631 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7632
7633 if (do_bounce) {
7634 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7635 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7636 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7637 }
7638}
7639
7640static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7641{
7642 char buf[96];
7643
7644 dd_dev_info(dd, "LCB Error: %s\n",
7645 lcb_err_string(buf, sizeof(buf), reg));
7646}
7647
7648/*
7649 * CCE block DC interrupt. Source is < 8.
7650 */
7651static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7652{
7653 const struct err_reg_info *eri = &dc_errs[source];
7654
7655 if (eri->handler) {
7656 interrupt_clear_down(dd, 0, eri);
7657 } else if (source == 3 /* dc_lbm_int */) {
7658 /*
7659 * This indicates that a parity error has occurred on the
7660 * address/control lines presented to the LBM. The error
7661 * is a single pulse, there is no associated error flag,
7662 * and it is non-maskable. This is because if a parity
7663 * error occurs on the request the request is dropped.
7664 * This should never occur, but it is nice to know if it
7665 * ever does.
7666 */
7667 dd_dev_err(dd, "Parity error in DC LBM block\n");
7668 } else {
7669 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7670 }
7671}
7672
7673/*
7674 * TX block send credit interrupt. Source is < 160.
7675 */
7676static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7677{
7678 sc_group_release_update(dd, source);
7679}
7680
7681/*
7682 * TX block SDMA interrupt. Source is < 48.
7683 *
7684 * SDMA interrupts are grouped by type:
7685 *
7686 * 0 - N-1 = SDma
7687 * N - 2N-1 = SDmaProgress
7688 * 2N - 3N-1 = SDmaIdle
7689 */
7690static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7691{
7692 /* what interrupt */
7693 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7694 /* which engine */
7695 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7696
7697#ifdef CONFIG_SDMA_VERBOSITY
7698 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7699 slashstrip(__FILE__), __LINE__, __func__);
7700 sdma_dumpstate(&dd->per_sdma[which]);
7701#endif
7702
7703 if (likely(what < 3 && which < dd->num_sdma)) {
7704 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7705 } else {
7706 /* should not happen */
7707 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7708 }
7709}
7710
7711/*
7712 * RX block receive available interrupt. Source is < 160.
7713 */
7714static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7715{
7716 struct hfi1_ctxtdata *rcd;
7717 char *err_detail;
7718
7719 if (likely(source < dd->num_rcv_contexts)) {
7720 rcd = dd->rcd[source];
7721 if (rcd) {
7722 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007723 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007724 else
7725 handle_user_interrupt(rcd);
7726 return; /* OK */
7727 }
7728 /* received an interrupt, but no rcd */
7729 err_detail = "dataless";
7730 } else {
7731 /* received an interrupt, but are not using that context */
7732 err_detail = "out of range";
7733 }
7734 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7735 err_detail, source);
7736}
7737
7738/*
7739 * RX block receive urgent interrupt. Source is < 160.
7740 */
7741static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7742{
7743 struct hfi1_ctxtdata *rcd;
7744 char *err_detail;
7745
7746 if (likely(source < dd->num_rcv_contexts)) {
7747 rcd = dd->rcd[source];
7748 if (rcd) {
7749 /* only pay attention to user urgent interrupts */
7750 if (source >= dd->first_user_ctxt)
7751 handle_user_interrupt(rcd);
7752 return; /* OK */
7753 }
7754 /* received an interrupt, but no rcd */
7755 err_detail = "dataless";
7756 } else {
7757 /* received an interrupt, but are not using that context */
7758 err_detail = "out of range";
7759 }
7760 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7761 err_detail, source);
7762}
7763
7764/*
7765 * Reserved range interrupt. Should not be called in normal operation.
7766 */
7767static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7768{
7769 char name[64];
7770
7771 dd_dev_err(dd, "unexpected %s interrupt\n",
7772 is_reserved_name(name, sizeof(name), source));
7773}
7774
7775static const struct is_table is_table[] = {
7776/* start end
7777 name func interrupt func */
7778{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7779 is_misc_err_name, is_misc_err_int },
7780{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7781 is_sdma_eng_err_name, is_sdma_eng_err_int },
7782{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7783 is_sendctxt_err_name, is_sendctxt_err_int },
7784{ IS_SDMA_START, IS_SDMA_END,
7785 is_sdma_eng_name, is_sdma_eng_int },
7786{ IS_VARIOUS_START, IS_VARIOUS_END,
7787 is_various_name, is_various_int },
7788{ IS_DC_START, IS_DC_END,
7789 is_dc_name, is_dc_int },
7790{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7791 is_rcv_avail_name, is_rcv_avail_int },
7792{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7793 is_rcv_urgent_name, is_rcv_urgent_int },
7794{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7795 is_send_credit_name, is_send_credit_int},
7796{ IS_RESERVED_START, IS_RESERVED_END,
7797 is_reserved_name, is_reserved_int},
7798};
7799
7800/*
7801 * Interrupt source interrupt - called when the given source has an interrupt.
7802 * Source is a bit index into an array of 64-bit integers.
7803 */
7804static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7805{
7806 const struct is_table *entry;
7807
7808 /* avoids a double compare by walking the table in-order */
7809 for (entry = &is_table[0]; entry->is_name; entry++) {
7810 if (source < entry->end) {
7811 trace_hfi1_interrupt(dd, entry, source);
7812 entry->is_int(dd, source - entry->start);
7813 return;
7814 }
7815 }
7816 /* fell off the end */
7817 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7818}
7819
7820/*
7821 * General interrupt handler. This is able to correctly handle
7822 * all interrupts in case INTx is used.
7823 */
7824static irqreturn_t general_interrupt(int irq, void *data)
7825{
7826 struct hfi1_devdata *dd = data;
7827 u64 regs[CCE_NUM_INT_CSRS];
7828 u32 bit;
7829 int i;
7830
7831 this_cpu_inc(*dd->int_counter);
7832
7833 /* phase 1: scan and clear all handled interrupts */
7834 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7835 if (dd->gi_mask[i] == 0) {
7836 regs[i] = 0; /* used later */
7837 continue;
7838 }
7839 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7840 dd->gi_mask[i];
7841 /* only clear if anything is set */
7842 if (regs[i])
7843 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7844 }
7845
7846 /* phase 2: call the appropriate handler */
7847 for_each_set_bit(bit, (unsigned long *)&regs[0],
7848 CCE_NUM_INT_CSRS*64) {
7849 is_interrupt(dd, bit);
7850 }
7851
7852 return IRQ_HANDLED;
7853}
7854
7855static irqreturn_t sdma_interrupt(int irq, void *data)
7856{
7857 struct sdma_engine *sde = data;
7858 struct hfi1_devdata *dd = sde->dd;
7859 u64 status;
7860
7861#ifdef CONFIG_SDMA_VERBOSITY
7862 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7863 slashstrip(__FILE__), __LINE__, __func__);
7864 sdma_dumpstate(sde);
7865#endif
7866
7867 this_cpu_inc(*dd->int_counter);
7868
7869 /* This read_csr is really bad in the hot path */
7870 status = read_csr(dd,
7871 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7872 & sde->imask;
7873 if (likely(status)) {
7874 /* clear the interrupt(s) */
7875 write_csr(dd,
7876 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7877 status);
7878
7879 /* handle the interrupt(s) */
7880 sdma_engine_interrupt(sde, status);
7881 } else
7882 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7883 sde->this_idx);
7884
7885 return IRQ_HANDLED;
7886}
7887
7888/*
Dean Luickf4f30031c2015-10-26 10:28:44 -04007889 * Clear the receive interrupt, forcing the write and making sure
7890 * we have data from the chip, pushing everything in front of it
7891 * back to the host.
7892 */
7893static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7894{
7895 struct hfi1_devdata *dd = rcd->dd;
7896 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7897
7898 mmiowb(); /* make sure everything before is written */
7899 write_csr(dd, addr, rcd->imask);
7900 /* force the above write on the chip and get a value back */
7901 (void)read_csr(dd, addr);
7902}
7903
7904/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05007905void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007906{
7907 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7908}
7909
7910/* return non-zero if a packet is present */
7911static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7912{
7913 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7914 return (rcd->seq_cnt ==
7915 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7916
7917 /* else is RDMA rtail */
7918 return (rcd->head != get_rcvhdrtail(rcd));
7919}
7920
7921/*
7922 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
7923 * This routine will try to handle packets immediately (latency), but if
7924 * it finds too many, it will invoke the thread handler (bandwitdh). The
7925 * chip receive interupt is *not* cleared down until this or the thread (if
7926 * invoked) is finished. The intent is to avoid extra interrupts while we
7927 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04007928 */
7929static irqreturn_t receive_context_interrupt(int irq, void *data)
7930{
7931 struct hfi1_ctxtdata *rcd = data;
7932 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04007933 int disposition;
7934 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007935
7936 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7937 this_cpu_inc(*dd->int_counter);
7938
Dean Luickf4f30031c2015-10-26 10:28:44 -04007939 /* receive interrupt remains blocked while processing packets */
7940 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007941
Dean Luickf4f30031c2015-10-26 10:28:44 -04007942 /*
7943 * Too many packets were seen while processing packets in this
7944 * IRQ handler. Invoke the handler thread. The receive interrupt
7945 * remains blocked.
7946 */
7947 if (disposition == RCV_PKT_LIMIT)
7948 return IRQ_WAKE_THREAD;
7949
7950 /*
7951 * The packet processor detected no more packets. Clear the receive
7952 * interrupt and recheck for a packet packet that may have arrived
7953 * after the previous check and interrupt clear. If a packet arrived,
7954 * force another interrupt.
7955 */
7956 clear_recv_intr(rcd);
7957 present = check_packet_present(rcd);
7958 if (present)
7959 force_recv_intr(rcd);
7960
7961 return IRQ_HANDLED;
7962}
7963
7964/*
7965 * Receive packet thread handler. This expects to be invoked with the
7966 * receive interrupt still blocked.
7967 */
7968static irqreturn_t receive_context_thread(int irq, void *data)
7969{
7970 struct hfi1_ctxtdata *rcd = data;
7971 int present;
7972
7973 /* receive interrupt is still blocked from the IRQ handler */
7974 (void)rcd->do_interrupt(rcd, 1);
7975
7976 /*
7977 * The packet processor will only return if it detected no more
7978 * packets. Hold IRQs here so we can safely clear the interrupt and
7979 * recheck for a packet that may have arrived after the previous
7980 * check and the interrupt clear. If a packet arrived, force another
7981 * interrupt.
7982 */
7983 local_irq_disable();
7984 clear_recv_intr(rcd);
7985 present = check_packet_present(rcd);
7986 if (present)
7987 force_recv_intr(rcd);
7988 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04007989
7990 return IRQ_HANDLED;
7991}
7992
7993/* ========================================================================= */
7994
7995u32 read_physical_state(struct hfi1_devdata *dd)
7996{
7997 u64 reg;
7998
7999 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8000 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8001 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8002}
8003
Jim Snowfb9036d2016-01-11 18:32:21 -05008004u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008005{
8006 u64 reg;
8007
8008 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8009 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8010 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8011}
8012
8013static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8014{
8015 u64 reg;
8016
8017 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8018 /* clear current state, set new state */
8019 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8020 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8021 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8022}
8023
8024/*
8025 * Use the 8051 to read a LCB CSR.
8026 */
8027static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8028{
8029 u32 regno;
8030 int ret;
8031
8032 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8033 if (acquire_lcb_access(dd, 0) == 0) {
8034 *data = read_csr(dd, addr);
8035 release_lcb_access(dd, 0);
8036 return 0;
8037 }
8038 return -EBUSY;
8039 }
8040
8041 /* register is an index of LCB registers: (offset - base) / 8 */
8042 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8043 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8044 if (ret != HCMD_SUCCESS)
8045 return -EBUSY;
8046 return 0;
8047}
8048
8049/*
8050 * Read an LCB CSR. Access may not be in host control, so check.
8051 * Return 0 on success, -EBUSY on failure.
8052 */
8053int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8054{
8055 struct hfi1_pportdata *ppd = dd->pport;
8056
8057 /* if up, go through the 8051 for the value */
8058 if (ppd->host_link_state & HLS_UP)
8059 return read_lcb_via_8051(dd, addr, data);
8060 /* if going up or down, no access */
8061 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8062 return -EBUSY;
8063 /* otherwise, host has access */
8064 *data = read_csr(dd, addr);
8065 return 0;
8066}
8067
8068/*
8069 * Use the 8051 to write a LCB CSR.
8070 */
8071static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8072{
Dean Luick3bf40d62015-11-06 20:07:04 -05008073 u32 regno;
8074 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008075
Dean Luick3bf40d62015-11-06 20:07:04 -05008076 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8077 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8078 if (acquire_lcb_access(dd, 0) == 0) {
8079 write_csr(dd, addr, data);
8080 release_lcb_access(dd, 0);
8081 return 0;
8082 }
8083 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008084 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008085
8086 /* register is an index of LCB registers: (offset - base) / 8 */
8087 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8088 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8089 if (ret != HCMD_SUCCESS)
8090 return -EBUSY;
8091 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008092}
8093
8094/*
8095 * Write an LCB CSR. Access may not be in host control, so check.
8096 * Return 0 on success, -EBUSY on failure.
8097 */
8098int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8099{
8100 struct hfi1_pportdata *ppd = dd->pport;
8101
8102 /* if up, go through the 8051 for the value */
8103 if (ppd->host_link_state & HLS_UP)
8104 return write_lcb_via_8051(dd, addr, data);
8105 /* if going up or down, no access */
8106 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8107 return -EBUSY;
8108 /* otherwise, host has access */
8109 write_csr(dd, addr, data);
8110 return 0;
8111}
8112
8113/*
8114 * Returns:
8115 * < 0 = Linux error, not able to get access
8116 * > 0 = 8051 command RETURN_CODE
8117 */
8118static int do_8051_command(
8119 struct hfi1_devdata *dd,
8120 u32 type,
8121 u64 in_data,
8122 u64 *out_data)
8123{
8124 u64 reg, completed;
8125 int return_code;
8126 unsigned long flags;
8127 unsigned long timeout;
8128
8129 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8130
8131 /*
8132 * Alternative to holding the lock for a long time:
8133 * - keep busy wait - have other users bounce off
8134 */
8135 spin_lock_irqsave(&dd->dc8051_lock, flags);
8136
8137 /* We can't send any commands to the 8051 if it's in reset */
8138 if (dd->dc_shutdown) {
8139 return_code = -ENODEV;
8140 goto fail;
8141 }
8142
8143 /*
8144 * If an 8051 host command timed out previously, then the 8051 is
8145 * stuck.
8146 *
8147 * On first timeout, attempt to reset and restart the entire DC
8148 * block (including 8051). (Is this too big of a hammer?)
8149 *
8150 * If the 8051 times out a second time, the reset did not bring it
8151 * back to healthy life. In that case, fail any subsequent commands.
8152 */
8153 if (dd->dc8051_timed_out) {
8154 if (dd->dc8051_timed_out > 1) {
8155 dd_dev_err(dd,
8156 "Previous 8051 host command timed out, skipping command %u\n",
8157 type);
8158 return_code = -ENXIO;
8159 goto fail;
8160 }
8161 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8162 dc_shutdown(dd);
8163 dc_start(dd);
8164 spin_lock_irqsave(&dd->dc8051_lock, flags);
8165 }
8166
8167 /*
8168 * If there is no timeout, then the 8051 command interface is
8169 * waiting for a command.
8170 */
8171
8172 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008173 * When writing a LCB CSR, out_data contains the full value to
8174 * to be written, while in_data contains the relative LCB
8175 * address in 7:0. Do the work here, rather than the caller,
8176 * of distrubting the write data to where it needs to go:
8177 *
8178 * Write data
8179 * 39:00 -> in_data[47:8]
8180 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8181 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8182 */
8183 if (type == HCMD_WRITE_LCB_CSR) {
8184 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8185 reg = ((((*out_data) >> 40) & 0xff) <<
8186 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8187 | ((((*out_data) >> 48) & 0xffff) <<
8188 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8189 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8190 }
8191
8192 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008193 * Do two writes: the first to stabilize the type and req_data, the
8194 * second to activate.
8195 */
8196 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8197 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8198 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8199 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8200 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8201 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8202 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8203
8204 /* wait for completion, alternate: interrupt */
8205 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8206 while (1) {
8207 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8208 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8209 if (completed)
8210 break;
8211 if (time_after(jiffies, timeout)) {
8212 dd->dc8051_timed_out++;
8213 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8214 if (out_data)
8215 *out_data = 0;
8216 return_code = -ETIMEDOUT;
8217 goto fail;
8218 }
8219 udelay(2);
8220 }
8221
8222 if (out_data) {
8223 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8224 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8225 if (type == HCMD_READ_LCB_CSR) {
8226 /* top 16 bits are in a different register */
8227 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8228 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8229 << (48
8230 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8231 }
8232 }
8233 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8234 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8235 dd->dc8051_timed_out = 0;
8236 /*
8237 * Clear command for next user.
8238 */
8239 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8240
8241fail:
8242 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8243
8244 return return_code;
8245}
8246
8247static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8248{
8249 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8250}
8251
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008252int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8253 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008254{
8255 u64 data;
8256 int ret;
8257
8258 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8259 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8260 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8261 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8262 if (ret != HCMD_SUCCESS) {
8263 dd_dev_err(dd,
8264 "load 8051 config: field id %d, lane %d, err %d\n",
8265 (int)field_id, (int)lane_id, ret);
8266 }
8267 return ret;
8268}
8269
8270/*
8271 * Read the 8051 firmware "registers". Use the RAM directly. Always
8272 * set the result, even on error.
8273 * Return 0 on success, -errno on failure
8274 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008275int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8276 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008277{
8278 u64 big_data;
8279 u32 addr;
8280 int ret;
8281
8282 /* address start depends on the lane_id */
8283 if (lane_id < 4)
8284 addr = (4 * NUM_GENERAL_FIELDS)
8285 + (lane_id * 4 * NUM_LANE_FIELDS);
8286 else
8287 addr = 0;
8288 addr += field_id * 4;
8289
8290 /* read is in 8-byte chunks, hardware will truncate the address down */
8291 ret = read_8051_data(dd, addr, 8, &big_data);
8292
8293 if (ret == 0) {
8294 /* extract the 4 bytes we want */
8295 if (addr & 0x4)
8296 *result = (u32)(big_data >> 32);
8297 else
8298 *result = (u32)big_data;
8299 } else {
8300 *result = 0;
8301 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8302 __func__, lane_id, field_id);
8303 }
8304
8305 return ret;
8306}
8307
8308static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8309 u8 continuous)
8310{
8311 u32 frame;
8312
8313 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8314 | power_management << POWER_MANAGEMENT_SHIFT;
8315 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8316 GENERAL_CONFIG, frame);
8317}
8318
8319static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8320 u16 vl15buf, u8 crc_sizes)
8321{
8322 u32 frame;
8323
8324 frame = (u32)vau << VAU_SHIFT
8325 | (u32)z << Z_SHIFT
8326 | (u32)vcu << VCU_SHIFT
8327 | (u32)vl15buf << VL15BUF_SHIFT
8328 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8329 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8330 GENERAL_CONFIG, frame);
8331}
8332
8333static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8334 u8 *flag_bits, u16 *link_widths)
8335{
8336 u32 frame;
8337
8338 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8339 &frame);
8340 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8341 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8342 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8343}
8344
8345static int write_vc_local_link_width(struct hfi1_devdata *dd,
8346 u8 misc_bits,
8347 u8 flag_bits,
8348 u16 link_widths)
8349{
8350 u32 frame;
8351
8352 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8353 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8354 | (u32)link_widths << LINK_WIDTH_SHIFT;
8355 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8356 frame);
8357}
8358
8359static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8360 u8 device_rev)
8361{
8362 u32 frame;
8363
8364 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8365 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8366 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8367}
8368
8369static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8370 u8 *device_rev)
8371{
8372 u32 frame;
8373
8374 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8375 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8376 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8377 & REMOTE_DEVICE_REV_MASK;
8378}
8379
8380void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8381{
8382 u32 frame;
8383
8384 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8385 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8386 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8387}
8388
8389static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8390 u8 *continuous)
8391{
8392 u32 frame;
8393
8394 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8395 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8396 & POWER_MANAGEMENT_MASK;
8397 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8398 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8399}
8400
8401static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8402 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8403{
8404 u32 frame;
8405
8406 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8407 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8408 *z = (frame >> Z_SHIFT) & Z_MASK;
8409 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8410 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8411 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8412}
8413
8414static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8415 u8 *remote_tx_rate,
8416 u16 *link_widths)
8417{
8418 u32 frame;
8419
8420 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8421 &frame);
8422 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8423 & REMOTE_TX_RATE_MASK;
8424 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8425}
8426
8427static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8428{
8429 u32 frame;
8430
8431 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8432 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8433}
8434
8435static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8436{
8437 u32 frame;
8438
8439 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8440 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8441}
8442
8443static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8444{
8445 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8446}
8447
8448static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8449{
8450 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8451}
8452
8453void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8454{
8455 u32 frame;
8456 int ret;
8457
8458 *link_quality = 0;
8459 if (dd->pport->host_link_state & HLS_UP) {
8460 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8461 &frame);
8462 if (ret == 0)
8463 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8464 & LINK_QUALITY_MASK;
8465 }
8466}
8467
8468static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8469{
8470 u32 frame;
8471
8472 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8473 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8474}
8475
8476static int read_tx_settings(struct hfi1_devdata *dd,
8477 u8 *enable_lane_tx,
8478 u8 *tx_polarity_inversion,
8479 u8 *rx_polarity_inversion,
8480 u8 *max_rate)
8481{
8482 u32 frame;
8483 int ret;
8484
8485 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8486 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8487 & ENABLE_LANE_TX_MASK;
8488 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8489 & TX_POLARITY_INVERSION_MASK;
8490 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8491 & RX_POLARITY_INVERSION_MASK;
8492 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8493 return ret;
8494}
8495
8496static int write_tx_settings(struct hfi1_devdata *dd,
8497 u8 enable_lane_tx,
8498 u8 tx_polarity_inversion,
8499 u8 rx_polarity_inversion,
8500 u8 max_rate)
8501{
8502 u32 frame;
8503
8504 /* no need to mask, all variable sizes match field widths */
8505 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8506 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8507 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8508 | max_rate << MAX_RATE_SHIFT;
8509 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8510}
8511
8512static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8513{
8514 u32 frame, version, prod_id;
8515 int ret, lane;
8516
8517 /* 4 lanes */
8518 for (lane = 0; lane < 4; lane++) {
8519 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8520 if (ret) {
8521 dd_dev_err(
8522 dd,
8523 "Unable to read lane %d firmware details\n",
8524 lane);
8525 continue;
8526 }
8527 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8528 & SPICO_ROM_VERSION_MASK;
8529 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8530 & SPICO_ROM_PROD_ID_MASK;
8531 dd_dev_info(dd,
8532 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8533 lane, version, prod_id);
8534 }
8535}
8536
8537/*
8538 * Read an idle LCB message.
8539 *
8540 * Returns 0 on success, -EINVAL on error
8541 */
8542static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8543{
8544 int ret;
8545
8546 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8547 type, data_out);
8548 if (ret != HCMD_SUCCESS) {
8549 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8550 (u32)type, ret);
8551 return -EINVAL;
8552 }
8553 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8554 /* return only the payload as we already know the type */
8555 *data_out >>= IDLE_PAYLOAD_SHIFT;
8556 return 0;
8557}
8558
8559/*
8560 * Read an idle SMA message. To be done in response to a notification from
8561 * the 8051.
8562 *
8563 * Returns 0 on success, -EINVAL on error
8564 */
8565static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8566{
8567 return read_idle_message(dd,
8568 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8569}
8570
8571/*
8572 * Send an idle LCB message.
8573 *
8574 * Returns 0 on success, -EINVAL on error
8575 */
8576static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8577{
8578 int ret;
8579
8580 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8581 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8582 if (ret != HCMD_SUCCESS) {
8583 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8584 data, ret);
8585 return -EINVAL;
8586 }
8587 return 0;
8588}
8589
8590/*
8591 * Send an idle SMA message.
8592 *
8593 * Returns 0 on success, -EINVAL on error
8594 */
8595int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8596{
8597 u64 data;
8598
8599 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8600 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8601 return send_idle_message(dd, data);
8602}
8603
8604/*
8605 * Initialize the LCB then do a quick link up. This may or may not be
8606 * in loopback.
8607 *
8608 * return 0 on success, -errno on error
8609 */
8610static int do_quick_linkup(struct hfi1_devdata *dd)
8611{
8612 u64 reg;
8613 unsigned long timeout;
8614 int ret;
8615
8616 lcb_shutdown(dd, 0);
8617
8618 if (loopback) {
8619 /* LCB_CFG_LOOPBACK.VAL = 2 */
8620 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8621 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8622 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8623 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8624 }
8625
8626 /* start the LCBs */
8627 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8628 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8629
8630 /* simulator only loopback steps */
8631 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8632 /* LCB_CFG_RUN.EN = 1 */
8633 write_csr(dd, DC_LCB_CFG_RUN,
8634 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8635
8636 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8637 timeout = jiffies + msecs_to_jiffies(10);
8638 while (1) {
8639 reg = read_csr(dd,
8640 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8641 if (reg)
8642 break;
8643 if (time_after(jiffies, timeout)) {
8644 dd_dev_err(dd,
8645 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8646 return -ETIMEDOUT;
8647 }
8648 udelay(2);
8649 }
8650
8651 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8652 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8653 }
8654
8655 if (!loopback) {
8656 /*
8657 * When doing quick linkup and not in loopback, both
8658 * sides must be done with LCB set-up before either
8659 * starts the quick linkup. Put a delay here so that
8660 * both sides can be started and have a chance to be
8661 * done with LCB set up before resuming.
8662 */
8663 dd_dev_err(dd,
8664 "Pausing for peer to be finished with LCB set up\n");
8665 msleep(5000);
8666 dd_dev_err(dd,
8667 "Continuing with quick linkup\n");
8668 }
8669
8670 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8671 set_8051_lcb_access(dd);
8672
8673 /*
8674 * State "quick" LinkUp request sets the physical link state to
8675 * LinkUp without a verify capability sequence.
8676 * This state is in simulator v37 and later.
8677 */
8678 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8679 if (ret != HCMD_SUCCESS) {
8680 dd_dev_err(dd,
8681 "%s: set physical link state to quick LinkUp failed with return %d\n",
8682 __func__, ret);
8683
8684 set_host_lcb_access(dd);
8685 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8686
8687 if (ret >= 0)
8688 ret = -EINVAL;
8689 return ret;
8690 }
8691
8692 return 0; /* success */
8693}
8694
8695/*
8696 * Set the SerDes to internal loopback mode.
8697 * Returns 0 on success, -errno on error.
8698 */
8699static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8700{
8701 int ret;
8702
8703 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8704 if (ret == HCMD_SUCCESS)
8705 return 0;
8706 dd_dev_err(dd,
8707 "Set physical link state to SerDes Loopback failed with return %d\n",
8708 ret);
8709 if (ret >= 0)
8710 ret = -EINVAL;
8711 return ret;
8712}
8713
8714/*
8715 * Do all special steps to set up loopback.
8716 */
8717static int init_loopback(struct hfi1_devdata *dd)
8718{
8719 dd_dev_info(dd, "Entering loopback mode\n");
8720
8721 /* all loopbacks should disable self GUID check */
8722 write_csr(dd, DC_DC8051_CFG_MODE,
8723 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8724
8725 /*
8726 * The simulator has only one loopback option - LCB. Switch
8727 * to that option, which includes quick link up.
8728 *
8729 * Accept all valid loopback values.
8730 */
8731 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8732 && (loopback == LOOPBACK_SERDES
8733 || loopback == LOOPBACK_LCB
8734 || loopback == LOOPBACK_CABLE)) {
8735 loopback = LOOPBACK_LCB;
8736 quick_linkup = 1;
8737 return 0;
8738 }
8739
8740 /* handle serdes loopback */
8741 if (loopback == LOOPBACK_SERDES) {
8742 /* internal serdes loopack needs quick linkup on RTL */
8743 if (dd->icode == ICODE_RTL_SILICON)
8744 quick_linkup = 1;
8745 return set_serdes_loopback_mode(dd);
8746 }
8747
8748 /* LCB loopback - handled at poll time */
8749 if (loopback == LOOPBACK_LCB) {
8750 quick_linkup = 1; /* LCB is always quick linkup */
8751
8752 /* not supported in emulation due to emulation RTL changes */
8753 if (dd->icode == ICODE_FPGA_EMULATION) {
8754 dd_dev_err(dd,
8755 "LCB loopback not supported in emulation\n");
8756 return -EINVAL;
8757 }
8758 return 0;
8759 }
8760
8761 /* external cable loopback requires no extra steps */
8762 if (loopback == LOOPBACK_CABLE)
8763 return 0;
8764
8765 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8766 return -EINVAL;
8767}
8768
8769/*
8770 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8771 * used in the Verify Capability link width attribute.
8772 */
8773static u16 opa_to_vc_link_widths(u16 opa_widths)
8774{
8775 int i;
8776 u16 result = 0;
8777
8778 static const struct link_bits {
8779 u16 from;
8780 u16 to;
8781 } opa_link_xlate[] = {
8782 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8783 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8784 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8785 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8786 };
8787
8788 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8789 if (opa_widths & opa_link_xlate[i].from)
8790 result |= opa_link_xlate[i].to;
8791 }
8792 return result;
8793}
8794
8795/*
8796 * Set link attributes before moving to polling.
8797 */
8798static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8799{
8800 struct hfi1_devdata *dd = ppd->dd;
8801 u8 enable_lane_tx;
8802 u8 tx_polarity_inversion;
8803 u8 rx_polarity_inversion;
8804 int ret;
8805
8806 /* reset our fabric serdes to clear any lingering problems */
8807 fabric_serdes_reset(dd);
8808
8809 /* set the local tx rate - need to read-modify-write */
8810 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8811 &rx_polarity_inversion, &ppd->local_tx_rate);
8812 if (ret)
8813 goto set_local_link_attributes_fail;
8814
8815 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8816 /* set the tx rate to the fastest enabled */
8817 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8818 ppd->local_tx_rate = 1;
8819 else
8820 ppd->local_tx_rate = 0;
8821 } else {
8822 /* set the tx rate to all enabled */
8823 ppd->local_tx_rate = 0;
8824 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8825 ppd->local_tx_rate |= 2;
8826 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8827 ppd->local_tx_rate |= 1;
8828 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008829
8830 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008831 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8832 rx_polarity_inversion, ppd->local_tx_rate);
8833 if (ret != HCMD_SUCCESS)
8834 goto set_local_link_attributes_fail;
8835
8836 /*
8837 * DC supports continuous updates.
8838 */
8839 ret = write_vc_local_phy(dd, 0 /* no power management */,
8840 1 /* continuous updates */);
8841 if (ret != HCMD_SUCCESS)
8842 goto set_local_link_attributes_fail;
8843
8844 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8845 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8846 ppd->port_crc_mode_enabled);
8847 if (ret != HCMD_SUCCESS)
8848 goto set_local_link_attributes_fail;
8849
8850 ret = write_vc_local_link_width(dd, 0, 0,
8851 opa_to_vc_link_widths(ppd->link_width_enabled));
8852 if (ret != HCMD_SUCCESS)
8853 goto set_local_link_attributes_fail;
8854
8855 /* let peer know who we are */
8856 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8857 if (ret == HCMD_SUCCESS)
8858 return 0;
8859
8860set_local_link_attributes_fail:
8861 dd_dev_err(dd,
8862 "Failed to set local link attributes, return 0x%x\n",
8863 ret);
8864 return ret;
8865}
8866
8867/*
8868 * Call this to start the link. Schedule a retry if the cable is not
8869 * present or if unable to start polling. Do not do anything if the
8870 * link is disabled. Returns 0 if link is disabled or moved to polling
8871 */
8872int start_link(struct hfi1_pportdata *ppd)
8873{
8874 if (!ppd->link_enabled) {
8875 dd_dev_info(ppd->dd,
8876 "%s: stopping link start because link is disabled\n",
8877 __func__);
8878 return 0;
8879 }
8880 if (!ppd->driver_link_ready) {
8881 dd_dev_info(ppd->dd,
8882 "%s: stopping link start because driver is not ready\n",
8883 __func__);
8884 return 0;
8885 }
8886
8887 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8888 loopback == LOOPBACK_LCB ||
8889 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8890 return set_link_state(ppd, HLS_DN_POLL);
8891
8892 dd_dev_info(ppd->dd,
8893 "%s: stopping link start because no cable is present\n",
8894 __func__);
8895 return -EAGAIN;
8896}
8897
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008898static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
8899{
8900 struct hfi1_devdata *dd = ppd->dd;
8901 u64 mask;
8902 unsigned long timeout;
8903
8904 /*
8905 * Check for QSFP interrupt for t_init (SFF 8679)
8906 */
8907 timeout = jiffies + msecs_to_jiffies(2000);
8908 while (1) {
8909 mask = read_csr(dd, dd->hfi1_id ?
8910 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
8911 if (!(mask & QSFP_HFI0_INT_N)) {
8912 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
8913 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
8914 break;
8915 }
8916 if (time_after(jiffies, timeout)) {
8917 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
8918 __func__);
8919 break;
8920 }
8921 udelay(2);
8922 }
8923}
8924
8925static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
8926{
8927 struct hfi1_devdata *dd = ppd->dd;
8928 u64 mask;
8929
8930 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
8931 if (enable)
8932 mask |= (u64)QSFP_HFI0_INT_N;
8933 else
8934 mask &= ~(u64)QSFP_HFI0_INT_N;
8935 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
8936}
8937
8938void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008939{
8940 struct hfi1_devdata *dd = ppd->dd;
8941 u64 mask, qsfp_mask;
8942
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008943 /* Disable INT_N from triggering QSFP interrupts */
8944 set_qsfp_int_n(ppd, 0);
8945
8946 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008947 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008948 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008949 qsfp_mask |= mask;
8950 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008951 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008952
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008953 qsfp_mask = read_csr(dd, dd->hfi1_id ?
8954 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008955 qsfp_mask &= ~mask;
8956 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008957 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008958
8959 udelay(10);
8960
8961 qsfp_mask |= mask;
8962 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008963 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
8964
8965 wait_for_qsfp_init(ppd);
8966
8967 /*
8968 * Allow INT_N to trigger the QSFP interrupt to watch
8969 * for alarms and warnings
8970 */
8971 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008972}
8973
8974static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
8975 u8 *qsfp_interrupt_status)
8976{
8977 struct hfi1_devdata *dd = ppd->dd;
8978
8979 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
8980 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
8981 dd_dev_info(dd,
8982 "%s: QSFP cable on fire\n",
8983 __func__);
8984
8985 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
8986 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
8987 dd_dev_info(dd,
8988 "%s: QSFP cable temperature too low\n",
8989 __func__);
8990
8991 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
8992 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
8993 dd_dev_info(dd,
8994 "%s: QSFP supply voltage too high\n",
8995 __func__);
8996
8997 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
8998 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
8999 dd_dev_info(dd,
9000 "%s: QSFP supply voltage too low\n",
9001 __func__);
9002
9003 /* Byte 2 is vendor specific */
9004
9005 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9006 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9007 dd_dev_info(dd,
9008 "%s: Cable RX channel 1/2 power too high\n",
9009 __func__);
9010
9011 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9012 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9013 dd_dev_info(dd,
9014 "%s: Cable RX channel 1/2 power too low\n",
9015 __func__);
9016
9017 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9018 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9019 dd_dev_info(dd,
9020 "%s: Cable RX channel 3/4 power too high\n",
9021 __func__);
9022
9023 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9024 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9025 dd_dev_info(dd,
9026 "%s: Cable RX channel 3/4 power too low\n",
9027 __func__);
9028
9029 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9030 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9031 dd_dev_info(dd,
9032 "%s: Cable TX channel 1/2 bias too high\n",
9033 __func__);
9034
9035 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9036 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9037 dd_dev_info(dd,
9038 "%s: Cable TX channel 1/2 bias too low\n",
9039 __func__);
9040
9041 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9042 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9043 dd_dev_info(dd,
9044 "%s: Cable TX channel 3/4 bias too high\n",
9045 __func__);
9046
9047 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9048 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9049 dd_dev_info(dd,
9050 "%s: Cable TX channel 3/4 bias too low\n",
9051 __func__);
9052
9053 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9054 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9055 dd_dev_info(dd,
9056 "%s: Cable TX channel 1/2 power too high\n",
9057 __func__);
9058
9059 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9060 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9061 dd_dev_info(dd,
9062 "%s: Cable TX channel 1/2 power too low\n",
9063 __func__);
9064
9065 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9066 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9067 dd_dev_info(dd,
9068 "%s: Cable TX channel 3/4 power too high\n",
9069 __func__);
9070
9071 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9072 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9073 dd_dev_info(dd,
9074 "%s: Cable TX channel 3/4 power too low\n",
9075 __func__);
9076
9077 /* Bytes 9-10 and 11-12 are reserved */
9078 /* Bytes 13-15 are vendor specific */
9079
9080 return 0;
9081}
9082
Mike Marciniszyn77241052015-07-30 15:17:43 -04009083/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009084void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009085{
9086 struct qsfp_data *qd;
9087 struct hfi1_pportdata *ppd;
9088 struct hfi1_devdata *dd;
9089
9090 qd = container_of(work, struct qsfp_data, qsfp_work);
9091 ppd = qd->ppd;
9092 dd = ppd->dd;
9093
9094 /* Sanity check */
9095 if (!qsfp_mod_present(ppd))
9096 return;
9097
9098 /*
9099 * Turn DC back on after cables has been
9100 * re-inserted. Up until now, the DC has been in
9101 * reset to save power.
9102 */
9103 dc_start(dd);
9104
9105 if (qd->cache_refresh_required) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009106
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009107 set_qsfp_int_n(ppd, 0);
9108
9109 wait_for_qsfp_init(ppd);
9110
9111 /*
9112 * Allow INT_N to trigger the QSFP interrupt to watch
9113 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009114 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009115 set_qsfp_int_n(ppd, 1);
9116
9117 tune_serdes(ppd);
9118
9119 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009120 }
9121
9122 if (qd->check_interrupt_flags) {
9123 u8 qsfp_interrupt_status[16] = {0,};
9124
9125 if (qsfp_read(ppd, dd->hfi1_id, 6,
9126 &qsfp_interrupt_status[0], 16) != 16) {
9127 dd_dev_info(dd,
9128 "%s: Failed to read status of QSFP module\n",
9129 __func__);
9130 } else {
9131 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009132
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009133 handle_qsfp_error_conditions(
9134 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009135 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9136 ppd->qsfp_info.check_interrupt_flags = 0;
9137 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9138 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009139 }
9140 }
9141}
9142
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009143static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009144{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009145 struct hfi1_pportdata *ppd = dd->pport;
9146 u64 qsfp_mask, cce_int_mask;
9147 const int qsfp1_int_smask = QSFP1_INT % 64;
9148 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009149
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009150 /*
9151 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9152 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9153 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9154 * the index of the appropriate CSR in the CCEIntMask CSR array
9155 */
9156 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9157 (8 * (QSFP1_INT / 64)));
9158 if (dd->hfi1_id) {
9159 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9160 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9161 cce_int_mask);
9162 } else {
9163 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9164 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9165 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009166 }
9167
Mike Marciniszyn77241052015-07-30 15:17:43 -04009168 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9169 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009170 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9171 qsfp_mask);
9172 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9173 qsfp_mask);
9174
9175 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009176
9177 /* Handle active low nature of INT_N and MODPRST_N pins */
9178 if (qsfp_mod_present(ppd))
9179 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9180 write_csr(dd,
9181 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9182 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009183}
9184
Dean Luickbbdeb332015-12-01 15:38:15 -05009185/*
9186 * Do a one-time initialize of the LCB block.
9187 */
9188static void init_lcb(struct hfi1_devdata *dd)
9189{
9190 /* the DC has been reset earlier in the driver load */
9191
9192 /* set LCB for cclk loopback on the port */
9193 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9194 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9195 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9196 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9197 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9198 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9199 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9200}
9201
Mike Marciniszyn77241052015-07-30 15:17:43 -04009202int bringup_serdes(struct hfi1_pportdata *ppd)
9203{
9204 struct hfi1_devdata *dd = ppd->dd;
9205 u64 guid;
9206 int ret;
9207
9208 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9209 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9210
9211 guid = ppd->guid;
9212 if (!guid) {
9213 if (dd->base_guid)
9214 guid = dd->base_guid + ppd->port - 1;
9215 ppd->guid = guid;
9216 }
9217
Mike Marciniszyn77241052015-07-30 15:17:43 -04009218 /* Set linkinit_reason on power up per OPA spec */
9219 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9220
Dean Luickbbdeb332015-12-01 15:38:15 -05009221 /* one-time init of the LCB */
9222 init_lcb(dd);
9223
Mike Marciniszyn77241052015-07-30 15:17:43 -04009224 if (loopback) {
9225 ret = init_loopback(dd);
9226 if (ret < 0)
9227 return ret;
9228 }
9229
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009230 /* tune the SERDES to a ballpark setting for
9231 * optimal signal and bit error rate
9232 * Needs to be done before starting the link
9233 */
9234 tune_serdes(ppd);
9235
Mike Marciniszyn77241052015-07-30 15:17:43 -04009236 return start_link(ppd);
9237}
9238
9239void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9240{
9241 struct hfi1_devdata *dd = ppd->dd;
9242
9243 /*
9244 * Shut down the link and keep it down. First turn off that the
9245 * driver wants to allow the link to be up (driver_link_ready).
9246 * Then make sure the link is not automatically restarted
9247 * (link_enabled). Cancel any pending restart. And finally
9248 * go offline.
9249 */
9250 ppd->driver_link_ready = 0;
9251 ppd->link_enabled = 0;
9252
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009253 ppd->offline_disabled_reason =
9254 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009255 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9256 OPA_LINKDOWN_REASON_SMA_DISABLED);
9257 set_link_state(ppd, HLS_DN_OFFLINE);
9258
9259 /* disable the port */
9260 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9261}
9262
9263static inline int init_cpu_counters(struct hfi1_devdata *dd)
9264{
9265 struct hfi1_pportdata *ppd;
9266 int i;
9267
9268 ppd = (struct hfi1_pportdata *)(dd + 1);
9269 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009270 ppd->ibport_data.rvp.rc_acks = NULL;
9271 ppd->ibport_data.rvp.rc_qacks = NULL;
9272 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9273 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9274 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9275 if (!ppd->ibport_data.rvp.rc_acks ||
9276 !ppd->ibport_data.rvp.rc_delayed_comp ||
9277 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009278 return -ENOMEM;
9279 }
9280
9281 return 0;
9282}
9283
9284static const char * const pt_names[] = {
9285 "expected",
9286 "eager",
9287 "invalid"
9288};
9289
9290static const char *pt_name(u32 type)
9291{
9292 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9293}
9294
9295/*
9296 * index is the index into the receive array
9297 */
9298void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9299 u32 type, unsigned long pa, u16 order)
9300{
9301 u64 reg;
9302 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9303 (dd->kregbase + RCV_ARRAY));
9304
9305 if (!(dd->flags & HFI1_PRESENT))
9306 goto done;
9307
9308 if (type == PT_INVALID) {
9309 pa = 0;
9310 } else if (type > PT_INVALID) {
9311 dd_dev_err(dd,
9312 "unexpected receive array type %u for index %u, not handled\n",
9313 type, index);
9314 goto done;
9315 }
9316
9317 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9318 pt_name(type), index, pa, (unsigned long)order);
9319
9320#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9321 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9322 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9323 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9324 << RCV_ARRAY_RT_ADDR_SHIFT;
9325 writeq(reg, base + (index * 8));
9326
9327 if (type == PT_EAGER)
9328 /*
9329 * Eager entries are written one-by-one so we have to push them
9330 * after we write the entry.
9331 */
9332 flush_wc();
9333done:
9334 return;
9335}
9336
9337void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9338{
9339 struct hfi1_devdata *dd = rcd->dd;
9340 u32 i;
9341
9342 /* this could be optimized */
9343 for (i = rcd->eager_base; i < rcd->eager_base +
9344 rcd->egrbufs.alloced; i++)
9345 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9346
9347 for (i = rcd->expected_base;
9348 i < rcd->expected_base + rcd->expected_count; i++)
9349 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9350}
9351
9352int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9353 struct hfi1_ctxt_info *kinfo)
9354{
9355 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9356 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9357 return 0;
9358}
9359
9360struct hfi1_message_header *hfi1_get_msgheader(
9361 struct hfi1_devdata *dd, __le32 *rhf_addr)
9362{
9363 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9364
9365 return (struct hfi1_message_header *)
9366 (rhf_addr - dd->rhf_offset + offset);
9367}
9368
9369static const char * const ib_cfg_name_strings[] = {
9370 "HFI1_IB_CFG_LIDLMC",
9371 "HFI1_IB_CFG_LWID_DG_ENB",
9372 "HFI1_IB_CFG_LWID_ENB",
9373 "HFI1_IB_CFG_LWID",
9374 "HFI1_IB_CFG_SPD_ENB",
9375 "HFI1_IB_CFG_SPD",
9376 "HFI1_IB_CFG_RXPOL_ENB",
9377 "HFI1_IB_CFG_LREV_ENB",
9378 "HFI1_IB_CFG_LINKLATENCY",
9379 "HFI1_IB_CFG_HRTBT",
9380 "HFI1_IB_CFG_OP_VLS",
9381 "HFI1_IB_CFG_VL_HIGH_CAP",
9382 "HFI1_IB_CFG_VL_LOW_CAP",
9383 "HFI1_IB_CFG_OVERRUN_THRESH",
9384 "HFI1_IB_CFG_PHYERR_THRESH",
9385 "HFI1_IB_CFG_LINKDEFAULT",
9386 "HFI1_IB_CFG_PKEYS",
9387 "HFI1_IB_CFG_MTU",
9388 "HFI1_IB_CFG_LSTATE",
9389 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9390 "HFI1_IB_CFG_PMA_TICKS",
9391 "HFI1_IB_CFG_PORT"
9392};
9393
9394static const char *ib_cfg_name(int which)
9395{
9396 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9397 return "invalid";
9398 return ib_cfg_name_strings[which];
9399}
9400
9401int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9402{
9403 struct hfi1_devdata *dd = ppd->dd;
9404 int val = 0;
9405
9406 switch (which) {
9407 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9408 val = ppd->link_width_enabled;
9409 break;
9410 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9411 val = ppd->link_width_active;
9412 break;
9413 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9414 val = ppd->link_speed_enabled;
9415 break;
9416 case HFI1_IB_CFG_SPD: /* current Link speed */
9417 val = ppd->link_speed_active;
9418 break;
9419
9420 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9421 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9422 case HFI1_IB_CFG_LINKLATENCY:
9423 goto unimplemented;
9424
9425 case HFI1_IB_CFG_OP_VLS:
9426 val = ppd->vls_operational;
9427 break;
9428 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9429 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9430 break;
9431 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9432 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9433 break;
9434 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9435 val = ppd->overrun_threshold;
9436 break;
9437 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9438 val = ppd->phy_error_threshold;
9439 break;
9440 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9441 val = dd->link_default;
9442 break;
9443
9444 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9445 case HFI1_IB_CFG_PMA_TICKS:
9446 default:
9447unimplemented:
9448 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9449 dd_dev_info(
9450 dd,
9451 "%s: which %s: not implemented\n",
9452 __func__,
9453 ib_cfg_name(which));
9454 break;
9455 }
9456
9457 return val;
9458}
9459
9460/*
9461 * The largest MAD packet size.
9462 */
9463#define MAX_MAD_PACKET 2048
9464
9465/*
9466 * Return the maximum header bytes that can go on the _wire_
9467 * for this device. This count includes the ICRC which is
9468 * not part of the packet held in memory but it is appended
9469 * by the HW.
9470 * This is dependent on the device's receive header entry size.
9471 * HFI allows this to be set per-receive context, but the
9472 * driver presently enforces a global value.
9473 */
9474u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9475{
9476 /*
9477 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9478 * the Receive Header Entry Size minus the PBC (or RHF) size
9479 * plus one DW for the ICRC appended by HW.
9480 *
9481 * dd->rcd[0].rcvhdrqentsize is in DW.
9482 * We use rcd[0] as all context will have the same value. Also,
9483 * the first kernel context would have been allocated by now so
9484 * we are guaranteed a valid value.
9485 */
9486 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9487}
9488
9489/*
9490 * Set Send Length
9491 * @ppd - per port data
9492 *
9493 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9494 * registers compare against LRH.PktLen, so use the max bytes included
9495 * in the LRH.
9496 *
9497 * This routine changes all VL values except VL15, which it maintains at
9498 * the same value.
9499 */
9500static void set_send_length(struct hfi1_pportdata *ppd)
9501{
9502 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009503 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9504 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009505 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9506 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9507 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9508 int i;
9509
9510 for (i = 0; i < ppd->vls_supported; i++) {
9511 if (dd->vld[i].mtu > maxvlmtu)
9512 maxvlmtu = dd->vld[i].mtu;
9513 if (i <= 3)
9514 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9515 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9516 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9517 else
9518 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9519 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9520 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9521 }
9522 write_csr(dd, SEND_LEN_CHECK0, len1);
9523 write_csr(dd, SEND_LEN_CHECK1, len2);
9524 /* adjust kernel credit return thresholds based on new MTUs */
9525 /* all kernel receive contexts have the same hdrqentsize */
9526 for (i = 0; i < ppd->vls_supported; i++) {
9527 sc_set_cr_threshold(dd->vld[i].sc,
9528 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9529 dd->rcd[0]->rcvhdrqentsize));
9530 }
9531 sc_set_cr_threshold(dd->vld[15].sc,
9532 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9533 dd->rcd[0]->rcvhdrqentsize));
9534
9535 /* Adjust maximum MTU for the port in DC */
9536 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9537 (ilog2(maxvlmtu >> 8) + 1);
9538 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9539 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9540 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9541 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9542 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9543}
9544
9545static void set_lidlmc(struct hfi1_pportdata *ppd)
9546{
9547 int i;
9548 u64 sreg = 0;
9549 struct hfi1_devdata *dd = ppd->dd;
9550 u32 mask = ~((1U << ppd->lmc) - 1);
9551 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9552
9553 if (dd->hfi1_snoop.mode_flag)
9554 dd_dev_info(dd, "Set lid/lmc while snooping");
9555
9556 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9557 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9558 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9559 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9560 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9561 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9562 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9563
9564 /*
9565 * Iterate over all the send contexts and set their SLID check
9566 */
9567 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9568 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9569 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9570 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9571
9572 for (i = 0; i < dd->chip_send_contexts; i++) {
9573 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9574 i, (u32)sreg);
9575 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9576 }
9577
9578 /* Now we have to do the same thing for the sdma engines */
9579 sdma_update_lmc(dd, mask, ppd->lid);
9580}
9581
9582static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9583{
9584 unsigned long timeout;
9585 u32 curr_state;
9586
9587 timeout = jiffies + msecs_to_jiffies(msecs);
9588 while (1) {
9589 curr_state = read_physical_state(dd);
9590 if (curr_state == state)
9591 break;
9592 if (time_after(jiffies, timeout)) {
9593 dd_dev_err(dd,
9594 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9595 state, curr_state);
9596 return -ETIMEDOUT;
9597 }
9598 usleep_range(1950, 2050); /* sleep 2ms-ish */
9599 }
9600
9601 return 0;
9602}
9603
9604/*
9605 * Helper for set_link_state(). Do not call except from that routine.
9606 * Expects ppd->hls_mutex to be held.
9607 *
9608 * @rem_reason value to be sent to the neighbor
9609 *
9610 * LinkDownReasons only set if transition succeeds.
9611 */
9612static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9613{
9614 struct hfi1_devdata *dd = ppd->dd;
9615 u32 pstate, previous_state;
9616 u32 last_local_state;
9617 u32 last_remote_state;
9618 int ret;
9619 int do_transition;
9620 int do_wait;
9621
9622 previous_state = ppd->host_link_state;
9623 ppd->host_link_state = HLS_GOING_OFFLINE;
9624 pstate = read_physical_state(dd);
9625 if (pstate == PLS_OFFLINE) {
9626 do_transition = 0; /* in right state */
9627 do_wait = 0; /* ...no need to wait */
9628 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9629 do_transition = 0; /* in an offline transient state */
9630 do_wait = 1; /* ...wait for it to settle */
9631 } else {
9632 do_transition = 1; /* need to move to offline */
9633 do_wait = 1; /* ...will need to wait */
9634 }
9635
9636 if (do_transition) {
9637 ret = set_physical_link_state(dd,
9638 PLS_OFFLINE | (rem_reason << 8));
9639
9640 if (ret != HCMD_SUCCESS) {
9641 dd_dev_err(dd,
9642 "Failed to transition to Offline link state, return %d\n",
9643 ret);
9644 return -EINVAL;
9645 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009646 if (ppd->offline_disabled_reason ==
9647 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009648 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009649 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009650 }
9651
9652 if (do_wait) {
9653 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009654 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009655 if (ret < 0)
9656 return ret;
9657 }
9658
9659 /* make sure the logical state is also down */
9660 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9661
9662 /*
9663 * Now in charge of LCB - must be after the physical state is
9664 * offline.quiet and before host_link_state is changed.
9665 */
9666 set_host_lcb_access(dd);
9667 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9668 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9669
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009670 if (ppd->port_type == PORT_TYPE_QSFP &&
9671 ppd->qsfp_info.limiting_active &&
9672 qsfp_mod_present(ppd)) {
9673 set_qsfp_tx(ppd, 0);
9674 }
9675
Mike Marciniszyn77241052015-07-30 15:17:43 -04009676 /*
9677 * The LNI has a mandatory wait time after the physical state
9678 * moves to Offline.Quiet. The wait time may be different
9679 * depending on how the link went down. The 8051 firmware
9680 * will observe the needed wait time and only move to ready
9681 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009682 * is 6s, so wait that long and then at least 0.5s more for
9683 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009684 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009685 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009686 if (ret) {
9687 dd_dev_err(dd,
9688 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9689 /* state is really offline, so make it so */
9690 ppd->host_link_state = HLS_DN_OFFLINE;
9691 return ret;
9692 }
9693
9694 /*
9695 * The state is now offline and the 8051 is ready to accept host
9696 * requests.
9697 * - change our state
9698 * - notify others if we were previously in a linkup state
9699 */
9700 ppd->host_link_state = HLS_DN_OFFLINE;
9701 if (previous_state & HLS_UP) {
9702 /* went down while link was up */
9703 handle_linkup_change(dd, 0);
9704 } else if (previous_state
9705 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9706 /* went down while attempting link up */
9707 /* byte 1 of last_*_state is the failure reason */
9708 read_last_local_state(dd, &last_local_state);
9709 read_last_remote_state(dd, &last_remote_state);
9710 dd_dev_err(dd,
9711 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9712 last_local_state, last_remote_state);
9713 }
9714
9715 /* the active link width (downgrade) is 0 on link down */
9716 ppd->link_width_active = 0;
9717 ppd->link_width_downgrade_tx_active = 0;
9718 ppd->link_width_downgrade_rx_active = 0;
9719 ppd->current_egress_rate = 0;
9720 return 0;
9721}
9722
9723/* return the link state name */
9724static const char *link_state_name(u32 state)
9725{
9726 const char *name;
9727 int n = ilog2(state);
9728 static const char * const names[] = {
9729 [__HLS_UP_INIT_BP] = "INIT",
9730 [__HLS_UP_ARMED_BP] = "ARMED",
9731 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9732 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9733 [__HLS_DN_POLL_BP] = "POLL",
9734 [__HLS_DN_DISABLE_BP] = "DISABLE",
9735 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9736 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9737 [__HLS_GOING_UP_BP] = "GOING_UP",
9738 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9739 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9740 };
9741
9742 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9743 return name ? name : "unknown";
9744}
9745
9746/* return the link state reason name */
9747static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9748{
9749 if (state == HLS_UP_INIT) {
9750 switch (ppd->linkinit_reason) {
9751 case OPA_LINKINIT_REASON_LINKUP:
9752 return "(LINKUP)";
9753 case OPA_LINKINIT_REASON_FLAPPING:
9754 return "(FLAPPING)";
9755 case OPA_LINKINIT_OUTSIDE_POLICY:
9756 return "(OUTSIDE_POLICY)";
9757 case OPA_LINKINIT_QUARANTINED:
9758 return "(QUARANTINED)";
9759 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9760 return "(INSUFIC_CAPABILITY)";
9761 default:
9762 break;
9763 }
9764 }
9765 return "";
9766}
9767
9768/*
9769 * driver_physical_state - convert the driver's notion of a port's
9770 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9771 * Return -1 (converted to a u32) to indicate error.
9772 */
9773u32 driver_physical_state(struct hfi1_pportdata *ppd)
9774{
9775 switch (ppd->host_link_state) {
9776 case HLS_UP_INIT:
9777 case HLS_UP_ARMED:
9778 case HLS_UP_ACTIVE:
9779 return IB_PORTPHYSSTATE_LINKUP;
9780 case HLS_DN_POLL:
9781 return IB_PORTPHYSSTATE_POLLING;
9782 case HLS_DN_DISABLE:
9783 return IB_PORTPHYSSTATE_DISABLED;
9784 case HLS_DN_OFFLINE:
9785 return OPA_PORTPHYSSTATE_OFFLINE;
9786 case HLS_VERIFY_CAP:
9787 return IB_PORTPHYSSTATE_POLLING;
9788 case HLS_GOING_UP:
9789 return IB_PORTPHYSSTATE_POLLING;
9790 case HLS_GOING_OFFLINE:
9791 return OPA_PORTPHYSSTATE_OFFLINE;
9792 case HLS_LINK_COOLDOWN:
9793 return OPA_PORTPHYSSTATE_OFFLINE;
9794 case HLS_DN_DOWNDEF:
9795 default:
9796 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9797 ppd->host_link_state);
9798 return -1;
9799 }
9800}
9801
9802/*
9803 * driver_logical_state - convert the driver's notion of a port's
9804 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9805 * (converted to a u32) to indicate error.
9806 */
9807u32 driver_logical_state(struct hfi1_pportdata *ppd)
9808{
9809 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9810 return IB_PORT_DOWN;
9811
9812 switch (ppd->host_link_state & HLS_UP) {
9813 case HLS_UP_INIT:
9814 return IB_PORT_INIT;
9815 case HLS_UP_ARMED:
9816 return IB_PORT_ARMED;
9817 case HLS_UP_ACTIVE:
9818 return IB_PORT_ACTIVE;
9819 default:
9820 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9821 ppd->host_link_state);
9822 return -1;
9823 }
9824}
9825
9826void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9827 u8 neigh_reason, u8 rem_reason)
9828{
9829 if (ppd->local_link_down_reason.latest == 0 &&
9830 ppd->neigh_link_down_reason.latest == 0) {
9831 ppd->local_link_down_reason.latest = lcl_reason;
9832 ppd->neigh_link_down_reason.latest = neigh_reason;
9833 ppd->remote_link_down_reason = rem_reason;
9834 }
9835}
9836
9837/*
9838 * Change the physical and/or logical link state.
9839 *
9840 * Do not call this routine while inside an interrupt. It contains
9841 * calls to routines that can take multiple seconds to finish.
9842 *
9843 * Returns 0 on success, -errno on failure.
9844 */
9845int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9846{
9847 struct hfi1_devdata *dd = ppd->dd;
9848 struct ib_event event = {.device = NULL};
9849 int ret1, ret = 0;
9850 int was_up, is_down;
9851 int orig_new_state, poll_bounce;
9852
9853 mutex_lock(&ppd->hls_lock);
9854
9855 orig_new_state = state;
9856 if (state == HLS_DN_DOWNDEF)
9857 state = dd->link_default;
9858
9859 /* interpret poll -> poll as a link bounce */
9860 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9861 && state == HLS_DN_POLL;
9862
9863 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9864 link_state_name(ppd->host_link_state),
9865 link_state_name(orig_new_state),
9866 poll_bounce ? "(bounce) " : "",
9867 link_state_reason_name(ppd, state));
9868
9869 was_up = !!(ppd->host_link_state & HLS_UP);
9870
9871 /*
9872 * If we're going to a (HLS_*) link state that implies the logical
9873 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9874 * reset is_sm_config_started to 0.
9875 */
9876 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9877 ppd->is_sm_config_started = 0;
9878
9879 /*
9880 * Do nothing if the states match. Let a poll to poll link bounce
9881 * go through.
9882 */
9883 if (ppd->host_link_state == state && !poll_bounce)
9884 goto done;
9885
9886 switch (state) {
9887 case HLS_UP_INIT:
9888 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9889 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9890 /*
9891 * Quick link up jumps from polling to here.
9892 *
9893 * Whether in normal or loopback mode, the
9894 * simulator jumps from polling to link up.
9895 * Accept that here.
9896 */
9897 /* OK */;
9898 } else if (ppd->host_link_state != HLS_GOING_UP) {
9899 goto unexpected;
9900 }
9901
9902 ppd->host_link_state = HLS_UP_INIT;
9903 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9904 if (ret) {
9905 /* logical state didn't change, stay at going_up */
9906 ppd->host_link_state = HLS_GOING_UP;
9907 dd_dev_err(dd,
9908 "%s: logical state did not change to INIT\n",
9909 __func__);
9910 } else {
9911 /* clear old transient LINKINIT_REASON code */
9912 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9913 ppd->linkinit_reason =
9914 OPA_LINKINIT_REASON_LINKUP;
9915
9916 /* enable the port */
9917 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9918
9919 handle_linkup_change(dd, 1);
9920 }
9921 break;
9922 case HLS_UP_ARMED:
9923 if (ppd->host_link_state != HLS_UP_INIT)
9924 goto unexpected;
9925
9926 ppd->host_link_state = HLS_UP_ARMED;
9927 set_logical_state(dd, LSTATE_ARMED);
9928 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9929 if (ret) {
9930 /* logical state didn't change, stay at init */
9931 ppd->host_link_state = HLS_UP_INIT;
9932 dd_dev_err(dd,
9933 "%s: logical state did not change to ARMED\n",
9934 __func__);
9935 }
9936 /*
9937 * The simulator does not currently implement SMA messages,
9938 * so neighbor_normal is not set. Set it here when we first
9939 * move to Armed.
9940 */
9941 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9942 ppd->neighbor_normal = 1;
9943 break;
9944 case HLS_UP_ACTIVE:
9945 if (ppd->host_link_state != HLS_UP_ARMED)
9946 goto unexpected;
9947
9948 ppd->host_link_state = HLS_UP_ACTIVE;
9949 set_logical_state(dd, LSTATE_ACTIVE);
9950 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9951 if (ret) {
9952 /* logical state didn't change, stay at armed */
9953 ppd->host_link_state = HLS_UP_ARMED;
9954 dd_dev_err(dd,
9955 "%s: logical state did not change to ACTIVE\n",
9956 __func__);
9957 } else {
9958
9959 /* tell all engines to go running */
9960 sdma_all_running(dd);
9961
9962 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -08009963 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009964 event.element.port_num = ppd->port;
9965 event.event = IB_EVENT_PORT_ACTIVE;
9966 }
9967 break;
9968 case HLS_DN_POLL:
9969 if ((ppd->host_link_state == HLS_DN_DISABLE ||
9970 ppd->host_link_state == HLS_DN_OFFLINE) &&
9971 dd->dc_shutdown)
9972 dc_start(dd);
9973 /* Hand LED control to the DC */
9974 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
9975
9976 if (ppd->host_link_state != HLS_DN_OFFLINE) {
9977 u8 tmp = ppd->link_enabled;
9978
9979 ret = goto_offline(ppd, ppd->remote_link_down_reason);
9980 if (ret) {
9981 ppd->link_enabled = tmp;
9982 break;
9983 }
9984 ppd->remote_link_down_reason = 0;
9985
9986 if (ppd->driver_link_ready)
9987 ppd->link_enabled = 1;
9988 }
9989
Jim Snowfb9036d2016-01-11 18:32:21 -05009990 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009991 ret = set_local_link_attributes(ppd);
9992 if (ret)
9993 break;
9994
9995 ppd->port_error_action = 0;
9996 ppd->host_link_state = HLS_DN_POLL;
9997
9998 if (quick_linkup) {
9999 /* quick linkup does not go into polling */
10000 ret = do_quick_linkup(dd);
10001 } else {
10002 ret1 = set_physical_link_state(dd, PLS_POLLING);
10003 if (ret1 != HCMD_SUCCESS) {
10004 dd_dev_err(dd,
10005 "Failed to transition to Polling link state, return 0x%x\n",
10006 ret1);
10007 ret = -EINVAL;
10008 }
10009 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010010 ppd->offline_disabled_reason =
10011 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010012 /*
10013 * If an error occurred above, go back to offline. The
10014 * caller may reschedule another attempt.
10015 */
10016 if (ret)
10017 goto_offline(ppd, 0);
10018 break;
10019 case HLS_DN_DISABLE:
10020 /* link is disabled */
10021 ppd->link_enabled = 0;
10022
10023 /* allow any state to transition to disabled */
10024
10025 /* must transition to offline first */
10026 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10027 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10028 if (ret)
10029 break;
10030 ppd->remote_link_down_reason = 0;
10031 }
10032
10033 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10034 if (ret1 != HCMD_SUCCESS) {
10035 dd_dev_err(dd,
10036 "Failed to transition to Disabled link state, return 0x%x\n",
10037 ret1);
10038 ret = -EINVAL;
10039 break;
10040 }
10041 ppd->host_link_state = HLS_DN_DISABLE;
10042 dc_shutdown(dd);
10043 break;
10044 case HLS_DN_OFFLINE:
10045 if (ppd->host_link_state == HLS_DN_DISABLE)
10046 dc_start(dd);
10047
10048 /* allow any state to transition to offline */
10049 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10050 if (!ret)
10051 ppd->remote_link_down_reason = 0;
10052 break;
10053 case HLS_VERIFY_CAP:
10054 if (ppd->host_link_state != HLS_DN_POLL)
10055 goto unexpected;
10056 ppd->host_link_state = HLS_VERIFY_CAP;
10057 break;
10058 case HLS_GOING_UP:
10059 if (ppd->host_link_state != HLS_VERIFY_CAP)
10060 goto unexpected;
10061
10062 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10063 if (ret1 != HCMD_SUCCESS) {
10064 dd_dev_err(dd,
10065 "Failed to transition to link up state, return 0x%x\n",
10066 ret1);
10067 ret = -EINVAL;
10068 break;
10069 }
10070 ppd->host_link_state = HLS_GOING_UP;
10071 break;
10072
10073 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10074 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10075 default:
10076 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10077 __func__, state);
10078 ret = -EINVAL;
10079 break;
10080 }
10081
10082 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10083 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10084
10085 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10086 ppd->neigh_link_down_reason.sma == 0) {
10087 ppd->local_link_down_reason.sma =
10088 ppd->local_link_down_reason.latest;
10089 ppd->neigh_link_down_reason.sma =
10090 ppd->neigh_link_down_reason.latest;
10091 }
10092
10093 goto done;
10094
10095unexpected:
10096 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10097 __func__, link_state_name(ppd->host_link_state),
10098 link_state_name(state));
10099 ret = -EINVAL;
10100
10101done:
10102 mutex_unlock(&ppd->hls_lock);
10103
10104 if (event.device)
10105 ib_dispatch_event(&event);
10106
10107 return ret;
10108}
10109
10110int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10111{
10112 u64 reg;
10113 int ret = 0;
10114
10115 switch (which) {
10116 case HFI1_IB_CFG_LIDLMC:
10117 set_lidlmc(ppd);
10118 break;
10119 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10120 /*
10121 * The VL Arbitrator high limit is sent in units of 4k
10122 * bytes, while HFI stores it in units of 64 bytes.
10123 */
10124 val *= 4096/64;
10125 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10126 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10127 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10128 break;
10129 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10130 /* HFI only supports POLL as the default link down state */
10131 if (val != HLS_DN_POLL)
10132 ret = -EINVAL;
10133 break;
10134 case HFI1_IB_CFG_OP_VLS:
10135 if (ppd->vls_operational != val) {
10136 ppd->vls_operational = val;
10137 if (!ppd->port)
10138 ret = -EINVAL;
10139 else
10140 ret = sdma_map_init(
10141 ppd->dd,
10142 ppd->port - 1,
10143 val,
10144 NULL);
10145 }
10146 break;
10147 /*
10148 * For link width, link width downgrade, and speed enable, always AND
10149 * the setting with what is actually supported. This has two benefits.
10150 * First, enabled can't have unsupported values, no matter what the
10151 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10152 * "fill in with your supported value" have all the bits in the
10153 * field set, so simply ANDing with supported has the desired result.
10154 */
10155 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10156 ppd->link_width_enabled = val & ppd->link_width_supported;
10157 break;
10158 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10159 ppd->link_width_downgrade_enabled =
10160 val & ppd->link_width_downgrade_supported;
10161 break;
10162 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10163 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10164 break;
10165 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10166 /*
10167 * HFI does not follow IB specs, save this value
10168 * so we can report it, if asked.
10169 */
10170 ppd->overrun_threshold = val;
10171 break;
10172 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10173 /*
10174 * HFI does not follow IB specs, save this value
10175 * so we can report it, if asked.
10176 */
10177 ppd->phy_error_threshold = val;
10178 break;
10179
10180 case HFI1_IB_CFG_MTU:
10181 set_send_length(ppd);
10182 break;
10183
10184 case HFI1_IB_CFG_PKEYS:
10185 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10186 set_partition_keys(ppd);
10187 break;
10188
10189 default:
10190 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10191 dd_dev_info(ppd->dd,
10192 "%s: which %s, val 0x%x: not implemented\n",
10193 __func__, ib_cfg_name(which), val);
10194 break;
10195 }
10196 return ret;
10197}
10198
10199/* begin functions related to vl arbitration table caching */
10200static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10201{
10202 int i;
10203
10204 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10205 VL_ARB_LOW_PRIO_TABLE_SIZE);
10206 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10207 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10208
10209 /*
10210 * Note that we always return values directly from the
10211 * 'vl_arb_cache' (and do no CSR reads) in response to a
10212 * 'Get(VLArbTable)'. This is obviously correct after a
10213 * 'Set(VLArbTable)', since the cache will then be up to
10214 * date. But it's also correct prior to any 'Set(VLArbTable)'
10215 * since then both the cache, and the relevant h/w registers
10216 * will be zeroed.
10217 */
10218
10219 for (i = 0; i < MAX_PRIO_TABLE; i++)
10220 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10221}
10222
10223/*
10224 * vl_arb_lock_cache
10225 *
10226 * All other vl_arb_* functions should be called only after locking
10227 * the cache.
10228 */
10229static inline struct vl_arb_cache *
10230vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10231{
10232 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10233 return NULL;
10234 spin_lock(&ppd->vl_arb_cache[idx].lock);
10235 return &ppd->vl_arb_cache[idx];
10236}
10237
10238static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10239{
10240 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10241}
10242
10243static void vl_arb_get_cache(struct vl_arb_cache *cache,
10244 struct ib_vl_weight_elem *vl)
10245{
10246 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10247}
10248
10249static void vl_arb_set_cache(struct vl_arb_cache *cache,
10250 struct ib_vl_weight_elem *vl)
10251{
10252 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10253}
10254
10255static int vl_arb_match_cache(struct vl_arb_cache *cache,
10256 struct ib_vl_weight_elem *vl)
10257{
10258 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10259}
10260/* end functions related to vl arbitration table caching */
10261
10262static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10263 u32 size, struct ib_vl_weight_elem *vl)
10264{
10265 struct hfi1_devdata *dd = ppd->dd;
10266 u64 reg;
10267 unsigned int i, is_up = 0;
10268 int drain, ret = 0;
10269
10270 mutex_lock(&ppd->hls_lock);
10271
10272 if (ppd->host_link_state & HLS_UP)
10273 is_up = 1;
10274
10275 drain = !is_ax(dd) && is_up;
10276
10277 if (drain)
10278 /*
10279 * Before adjusting VL arbitration weights, empty per-VL
10280 * FIFOs, otherwise a packet whose VL weight is being
10281 * set to 0 could get stuck in a FIFO with no chance to
10282 * egress.
10283 */
10284 ret = stop_drain_data_vls(dd);
10285
10286 if (ret) {
10287 dd_dev_err(
10288 dd,
10289 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10290 __func__);
10291 goto err;
10292 }
10293
10294 for (i = 0; i < size; i++, vl++) {
10295 /*
10296 * NOTE: The low priority shift and mask are used here, but
10297 * they are the same for both the low and high registers.
10298 */
10299 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10300 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10301 | (((u64)vl->weight
10302 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10303 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10304 write_csr(dd, target + (i * 8), reg);
10305 }
10306 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10307
10308 if (drain)
10309 open_fill_data_vls(dd); /* reopen all VLs */
10310
10311err:
10312 mutex_unlock(&ppd->hls_lock);
10313
10314 return ret;
10315}
10316
10317/*
10318 * Read one credit merge VL register.
10319 */
10320static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10321 struct vl_limit *vll)
10322{
10323 u64 reg = read_csr(dd, csr);
10324
10325 vll->dedicated = cpu_to_be16(
10326 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10327 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10328 vll->shared = cpu_to_be16(
10329 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10330 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10331}
10332
10333/*
10334 * Read the current credit merge limits.
10335 */
10336static int get_buffer_control(struct hfi1_devdata *dd,
10337 struct buffer_control *bc, u16 *overall_limit)
10338{
10339 u64 reg;
10340 int i;
10341
10342 /* not all entries are filled in */
10343 memset(bc, 0, sizeof(*bc));
10344
10345 /* OPA and HFI have a 1-1 mapping */
10346 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10347 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10348
10349 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10350 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10351
10352 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10353 bc->overall_shared_limit = cpu_to_be16(
10354 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10355 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10356 if (overall_limit)
10357 *overall_limit = (reg
10358 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10359 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10360 return sizeof(struct buffer_control);
10361}
10362
10363static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10364{
10365 u64 reg;
10366 int i;
10367
10368 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10369 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10370 for (i = 0; i < sizeof(u64); i++) {
10371 u8 byte = *(((u8 *)&reg) + i);
10372
10373 dp->vlnt[2 * i] = byte & 0xf;
10374 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10375 }
10376
10377 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10378 for (i = 0; i < sizeof(u64); i++) {
10379 u8 byte = *(((u8 *)&reg) + i);
10380
10381 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10382 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10383 }
10384 return sizeof(struct sc2vlnt);
10385}
10386
10387static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10388 struct ib_vl_weight_elem *vl)
10389{
10390 unsigned int i;
10391
10392 for (i = 0; i < nelems; i++, vl++) {
10393 vl->vl = 0xf;
10394 vl->weight = 0;
10395 }
10396}
10397
10398static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10399{
10400 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10401 DC_SC_VL_VAL(15_0,
10402 0, dp->vlnt[0] & 0xf,
10403 1, dp->vlnt[1] & 0xf,
10404 2, dp->vlnt[2] & 0xf,
10405 3, dp->vlnt[3] & 0xf,
10406 4, dp->vlnt[4] & 0xf,
10407 5, dp->vlnt[5] & 0xf,
10408 6, dp->vlnt[6] & 0xf,
10409 7, dp->vlnt[7] & 0xf,
10410 8, dp->vlnt[8] & 0xf,
10411 9, dp->vlnt[9] & 0xf,
10412 10, dp->vlnt[10] & 0xf,
10413 11, dp->vlnt[11] & 0xf,
10414 12, dp->vlnt[12] & 0xf,
10415 13, dp->vlnt[13] & 0xf,
10416 14, dp->vlnt[14] & 0xf,
10417 15, dp->vlnt[15] & 0xf));
10418 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10419 DC_SC_VL_VAL(31_16,
10420 16, dp->vlnt[16] & 0xf,
10421 17, dp->vlnt[17] & 0xf,
10422 18, dp->vlnt[18] & 0xf,
10423 19, dp->vlnt[19] & 0xf,
10424 20, dp->vlnt[20] & 0xf,
10425 21, dp->vlnt[21] & 0xf,
10426 22, dp->vlnt[22] & 0xf,
10427 23, dp->vlnt[23] & 0xf,
10428 24, dp->vlnt[24] & 0xf,
10429 25, dp->vlnt[25] & 0xf,
10430 26, dp->vlnt[26] & 0xf,
10431 27, dp->vlnt[27] & 0xf,
10432 28, dp->vlnt[28] & 0xf,
10433 29, dp->vlnt[29] & 0xf,
10434 30, dp->vlnt[30] & 0xf,
10435 31, dp->vlnt[31] & 0xf));
10436}
10437
10438static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10439 u16 limit)
10440{
10441 if (limit != 0)
10442 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10443 what, (int)limit, idx);
10444}
10445
10446/* change only the shared limit portion of SendCmGLobalCredit */
10447static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10448{
10449 u64 reg;
10450
10451 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10452 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10453 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10454 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10455}
10456
10457/* change only the total credit limit portion of SendCmGLobalCredit */
10458static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10459{
10460 u64 reg;
10461
10462 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10463 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10464 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10465 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10466}
10467
10468/* set the given per-VL shared limit */
10469static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10470{
10471 u64 reg;
10472 u32 addr;
10473
10474 if (vl < TXE_NUM_DATA_VL)
10475 addr = SEND_CM_CREDIT_VL + (8 * vl);
10476 else
10477 addr = SEND_CM_CREDIT_VL15;
10478
10479 reg = read_csr(dd, addr);
10480 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10481 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10482 write_csr(dd, addr, reg);
10483}
10484
10485/* set the given per-VL dedicated limit */
10486static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10487{
10488 u64 reg;
10489 u32 addr;
10490
10491 if (vl < TXE_NUM_DATA_VL)
10492 addr = SEND_CM_CREDIT_VL + (8 * vl);
10493 else
10494 addr = SEND_CM_CREDIT_VL15;
10495
10496 reg = read_csr(dd, addr);
10497 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10498 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10499 write_csr(dd, addr, reg);
10500}
10501
10502/* spin until the given per-VL status mask bits clear */
10503static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10504 const char *which)
10505{
10506 unsigned long timeout;
10507 u64 reg;
10508
10509 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10510 while (1) {
10511 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10512
10513 if (reg == 0)
10514 return; /* success */
10515 if (time_after(jiffies, timeout))
10516 break; /* timed out */
10517 udelay(1);
10518 }
10519
10520 dd_dev_err(dd,
10521 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10522 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10523 /*
10524 * If this occurs, it is likely there was a credit loss on the link.
10525 * The only recovery from that is a link bounce.
10526 */
10527 dd_dev_err(dd,
10528 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10529}
10530
10531/*
10532 * The number of credits on the VLs may be changed while everything
10533 * is "live", but the following algorithm must be followed due to
10534 * how the hardware is actually implemented. In particular,
10535 * Return_Credit_Status[] is the only correct status check.
10536 *
10537 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10538 * set Global_Shared_Credit_Limit = 0
10539 * use_all_vl = 1
10540 * mask0 = all VLs that are changing either dedicated or shared limits
10541 * set Shared_Limit[mask0] = 0
10542 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10543 * if (changing any dedicated limit)
10544 * mask1 = all VLs that are lowering dedicated limits
10545 * lower Dedicated_Limit[mask1]
10546 * spin until Return_Credit_Status[mask1] == 0
10547 * raise Dedicated_Limits
10548 * raise Shared_Limits
10549 * raise Global_Shared_Credit_Limit
10550 *
10551 * lower = if the new limit is lower, set the limit to the new value
10552 * raise = if the new limit is higher than the current value (may be changed
10553 * earlier in the algorithm), set the new limit to the new value
10554 */
10555static int set_buffer_control(struct hfi1_devdata *dd,
10556 struct buffer_control *new_bc)
10557{
10558 u64 changing_mask, ld_mask, stat_mask;
10559 int change_count;
10560 int i, use_all_mask;
10561 int this_shared_changing;
10562 /*
10563 * A0: add the variable any_shared_limit_changing below and in the
10564 * algorithm above. If removing A0 support, it can be removed.
10565 */
10566 int any_shared_limit_changing;
10567 struct buffer_control cur_bc;
10568 u8 changing[OPA_MAX_VLS];
10569 u8 lowering_dedicated[OPA_MAX_VLS];
10570 u16 cur_total;
10571 u32 new_total = 0;
10572 const u64 all_mask =
10573 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10574 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10575 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10576 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10577 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10578 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10579 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10580 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10581 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10582
10583#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10584#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10585
10586
10587 /* find the new total credits, do sanity check on unused VLs */
10588 for (i = 0; i < OPA_MAX_VLS; i++) {
10589 if (valid_vl(i)) {
10590 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10591 continue;
10592 }
10593 nonzero_msg(dd, i, "dedicated",
10594 be16_to_cpu(new_bc->vl[i].dedicated));
10595 nonzero_msg(dd, i, "shared",
10596 be16_to_cpu(new_bc->vl[i].shared));
10597 new_bc->vl[i].dedicated = 0;
10598 new_bc->vl[i].shared = 0;
10599 }
10600 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010601
Mike Marciniszyn77241052015-07-30 15:17:43 -040010602 /* fetch the current values */
10603 get_buffer_control(dd, &cur_bc, &cur_total);
10604
10605 /*
10606 * Create the masks we will use.
10607 */
10608 memset(changing, 0, sizeof(changing));
10609 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10610 /* NOTE: Assumes that the individual VL bits are adjacent and in
10611 increasing order */
10612 stat_mask =
10613 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10614 changing_mask = 0;
10615 ld_mask = 0;
10616 change_count = 0;
10617 any_shared_limit_changing = 0;
10618 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10619 if (!valid_vl(i))
10620 continue;
10621 this_shared_changing = new_bc->vl[i].shared
10622 != cur_bc.vl[i].shared;
10623 if (this_shared_changing)
10624 any_shared_limit_changing = 1;
10625 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10626 || this_shared_changing) {
10627 changing[i] = 1;
10628 changing_mask |= stat_mask;
10629 change_count++;
10630 }
10631 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10632 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10633 lowering_dedicated[i] = 1;
10634 ld_mask |= stat_mask;
10635 }
10636 }
10637
10638 /* bracket the credit change with a total adjustment */
10639 if (new_total > cur_total)
10640 set_global_limit(dd, new_total);
10641
10642 /*
10643 * Start the credit change algorithm.
10644 */
10645 use_all_mask = 0;
10646 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010647 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10648 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010649 set_global_shared(dd, 0);
10650 cur_bc.overall_shared_limit = 0;
10651 use_all_mask = 1;
10652 }
10653
10654 for (i = 0; i < NUM_USABLE_VLS; i++) {
10655 if (!valid_vl(i))
10656 continue;
10657
10658 if (changing[i]) {
10659 set_vl_shared(dd, i, 0);
10660 cur_bc.vl[i].shared = 0;
10661 }
10662 }
10663
10664 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10665 "shared");
10666
10667 if (change_count > 0) {
10668 for (i = 0; i < NUM_USABLE_VLS; i++) {
10669 if (!valid_vl(i))
10670 continue;
10671
10672 if (lowering_dedicated[i]) {
10673 set_vl_dedicated(dd, i,
10674 be16_to_cpu(new_bc->vl[i].dedicated));
10675 cur_bc.vl[i].dedicated =
10676 new_bc->vl[i].dedicated;
10677 }
10678 }
10679
10680 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10681
10682 /* now raise all dedicated that are going up */
10683 for (i = 0; i < NUM_USABLE_VLS; i++) {
10684 if (!valid_vl(i))
10685 continue;
10686
10687 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10688 be16_to_cpu(cur_bc.vl[i].dedicated))
10689 set_vl_dedicated(dd, i,
10690 be16_to_cpu(new_bc->vl[i].dedicated));
10691 }
10692 }
10693
10694 /* next raise all shared that are going up */
10695 for (i = 0; i < NUM_USABLE_VLS; i++) {
10696 if (!valid_vl(i))
10697 continue;
10698
10699 if (be16_to_cpu(new_bc->vl[i].shared) >
10700 be16_to_cpu(cur_bc.vl[i].shared))
10701 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10702 }
10703
10704 /* finally raise the global shared */
10705 if (be16_to_cpu(new_bc->overall_shared_limit) >
10706 be16_to_cpu(cur_bc.overall_shared_limit))
10707 set_global_shared(dd,
10708 be16_to_cpu(new_bc->overall_shared_limit));
10709
10710 /* bracket the credit change with a total adjustment */
10711 if (new_total < cur_total)
10712 set_global_limit(dd, new_total);
10713 return 0;
10714}
10715
10716/*
10717 * Read the given fabric manager table. Return the size of the
10718 * table (in bytes) on success, and a negative error code on
10719 * failure.
10720 */
10721int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10722
10723{
10724 int size;
10725 struct vl_arb_cache *vlc;
10726
10727 switch (which) {
10728 case FM_TBL_VL_HIGH_ARB:
10729 size = 256;
10730 /*
10731 * OPA specifies 128 elements (of 2 bytes each), though
10732 * HFI supports only 16 elements in h/w.
10733 */
10734 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10735 vl_arb_get_cache(vlc, t);
10736 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10737 break;
10738 case FM_TBL_VL_LOW_ARB:
10739 size = 256;
10740 /*
10741 * OPA specifies 128 elements (of 2 bytes each), though
10742 * HFI supports only 16 elements in h/w.
10743 */
10744 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10745 vl_arb_get_cache(vlc, t);
10746 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10747 break;
10748 case FM_TBL_BUFFER_CONTROL:
10749 size = get_buffer_control(ppd->dd, t, NULL);
10750 break;
10751 case FM_TBL_SC2VLNT:
10752 size = get_sc2vlnt(ppd->dd, t);
10753 break;
10754 case FM_TBL_VL_PREEMPT_ELEMS:
10755 size = 256;
10756 /* OPA specifies 128 elements, of 2 bytes each */
10757 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10758 break;
10759 case FM_TBL_VL_PREEMPT_MATRIX:
10760 size = 256;
10761 /*
10762 * OPA specifies that this is the same size as the VL
10763 * arbitration tables (i.e., 256 bytes).
10764 */
10765 break;
10766 default:
10767 return -EINVAL;
10768 }
10769 return size;
10770}
10771
10772/*
10773 * Write the given fabric manager table.
10774 */
10775int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10776{
10777 int ret = 0;
10778 struct vl_arb_cache *vlc;
10779
10780 switch (which) {
10781 case FM_TBL_VL_HIGH_ARB:
10782 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10783 if (vl_arb_match_cache(vlc, t)) {
10784 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10785 break;
10786 }
10787 vl_arb_set_cache(vlc, t);
10788 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10789 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10790 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10791 break;
10792 case FM_TBL_VL_LOW_ARB:
10793 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10794 if (vl_arb_match_cache(vlc, t)) {
10795 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10796 break;
10797 }
10798 vl_arb_set_cache(vlc, t);
10799 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10800 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10801 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10802 break;
10803 case FM_TBL_BUFFER_CONTROL:
10804 ret = set_buffer_control(ppd->dd, t);
10805 break;
10806 case FM_TBL_SC2VLNT:
10807 set_sc2vlnt(ppd->dd, t);
10808 break;
10809 default:
10810 ret = -EINVAL;
10811 }
10812 return ret;
10813}
10814
10815/*
10816 * Disable all data VLs.
10817 *
10818 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10819 */
10820static int disable_data_vls(struct hfi1_devdata *dd)
10821{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010822 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010823 return 1;
10824
10825 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10826
10827 return 0;
10828}
10829
10830/*
10831 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10832 * Just re-enables all data VLs (the "fill" part happens
10833 * automatically - the name was chosen for symmetry with
10834 * stop_drain_data_vls()).
10835 *
10836 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10837 */
10838int open_fill_data_vls(struct hfi1_devdata *dd)
10839{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010840 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010841 return 1;
10842
10843 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10844
10845 return 0;
10846}
10847
10848/*
10849 * drain_data_vls() - assumes that disable_data_vls() has been called,
10850 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10851 * engines to drop to 0.
10852 */
10853static void drain_data_vls(struct hfi1_devdata *dd)
10854{
10855 sc_wait(dd);
10856 sdma_wait(dd);
10857 pause_for_credit_return(dd);
10858}
10859
10860/*
10861 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10862 *
10863 * Use open_fill_data_vls() to resume using data VLs. This pair is
10864 * meant to be used like this:
10865 *
10866 * stop_drain_data_vls(dd);
10867 * // do things with per-VL resources
10868 * open_fill_data_vls(dd);
10869 */
10870int stop_drain_data_vls(struct hfi1_devdata *dd)
10871{
10872 int ret;
10873
10874 ret = disable_data_vls(dd);
10875 if (ret == 0)
10876 drain_data_vls(dd);
10877
10878 return ret;
10879}
10880
10881/*
10882 * Convert a nanosecond time to a cclock count. No matter how slow
10883 * the cclock, a non-zero ns will always have a non-zero result.
10884 */
10885u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10886{
10887 u32 cclocks;
10888
10889 if (dd->icode == ICODE_FPGA_EMULATION)
10890 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10891 else /* simulation pretends to be ASIC */
10892 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10893 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
10894 cclocks = 1;
10895 return cclocks;
10896}
10897
10898/*
10899 * Convert a cclock count to nanoseconds. Not matter how slow
10900 * the cclock, a non-zero cclocks will always have a non-zero result.
10901 */
10902u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10903{
10904 u32 ns;
10905
10906 if (dd->icode == ICODE_FPGA_EMULATION)
10907 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10908 else /* simulation pretends to be ASIC */
10909 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10910 if (cclocks && !ns)
10911 ns = 1;
10912 return ns;
10913}
10914
10915/*
10916 * Dynamically adjust the receive interrupt timeout for a context based on
10917 * incoming packet rate.
10918 *
10919 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10920 */
10921static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10922{
10923 struct hfi1_devdata *dd = rcd->dd;
10924 u32 timeout = rcd->rcvavail_timeout;
10925
10926 /*
10927 * This algorithm doubles or halves the timeout depending on whether
10928 * the number of packets received in this interrupt were less than or
10929 * greater equal the interrupt count.
10930 *
10931 * The calculations below do not allow a steady state to be achieved.
10932 * Only at the endpoints it is possible to have an unchanging
10933 * timeout.
10934 */
10935 if (npkts < rcv_intr_count) {
10936 /*
10937 * Not enough packets arrived before the timeout, adjust
10938 * timeout downward.
10939 */
10940 if (timeout < 2) /* already at minimum? */
10941 return;
10942 timeout >>= 1;
10943 } else {
10944 /*
10945 * More than enough packets arrived before the timeout, adjust
10946 * timeout upward.
10947 */
10948 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10949 return;
10950 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10951 }
10952
10953 rcd->rcvavail_timeout = timeout;
10954 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10955 been verified to be in range */
10956 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10957 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10958}
10959
10960void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
10961 u32 intr_adjust, u32 npkts)
10962{
10963 struct hfi1_devdata *dd = rcd->dd;
10964 u64 reg;
10965 u32 ctxt = rcd->ctxt;
10966
10967 /*
10968 * Need to write timeout register before updating RcvHdrHead to ensure
10969 * that a new value is used when the HW decides to restart counting.
10970 */
10971 if (intr_adjust)
10972 adjust_rcv_timeout(rcd, npkts);
10973 if (updegr) {
10974 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
10975 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
10976 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
10977 }
10978 mmiowb();
10979 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
10980 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
10981 << RCV_HDR_HEAD_HEAD_SHIFT);
10982 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
10983 mmiowb();
10984}
10985
10986u32 hdrqempty(struct hfi1_ctxtdata *rcd)
10987{
10988 u32 head, tail;
10989
10990 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
10991 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
10992
10993 if (rcd->rcvhdrtail_kvaddr)
10994 tail = get_rcvhdrtail(rcd);
10995 else
10996 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
10997
10998 return head == tail;
10999}
11000
11001/*
11002 * Context Control and Receive Array encoding for buffer size:
11003 * 0x0 invalid
11004 * 0x1 4 KB
11005 * 0x2 8 KB
11006 * 0x3 16 KB
11007 * 0x4 32 KB
11008 * 0x5 64 KB
11009 * 0x6 128 KB
11010 * 0x7 256 KB
11011 * 0x8 512 KB (Receive Array only)
11012 * 0x9 1 MB (Receive Array only)
11013 * 0xa 2 MB (Receive Array only)
11014 *
11015 * 0xB-0xF - reserved (Receive Array only)
11016 *
11017 *
11018 * This routine assumes that the value has already been sanity checked.
11019 */
11020static u32 encoded_size(u32 size)
11021{
11022 switch (size) {
11023 case 4*1024: return 0x1;
11024 case 8*1024: return 0x2;
11025 case 16*1024: return 0x3;
11026 case 32*1024: return 0x4;
11027 case 64*1024: return 0x5;
11028 case 128*1024: return 0x6;
11029 case 256*1024: return 0x7;
11030 case 512*1024: return 0x8;
11031 case 1*1024*1024: return 0x9;
11032 case 2*1024*1024: return 0xa;
11033 }
11034 return 0x1; /* if invalid, go with the minimum size */
11035}
11036
11037void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11038{
11039 struct hfi1_ctxtdata *rcd;
11040 u64 rcvctrl, reg;
11041 int did_enable = 0;
11042
11043 rcd = dd->rcd[ctxt];
11044 if (!rcd)
11045 return;
11046
11047 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11048
11049 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11050 /* if the context already enabled, don't do the extra steps */
11051 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11052 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11053 /* reset the tail and hdr addresses, and sequence count */
11054 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11055 rcd->rcvhdrq_phys);
11056 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11057 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11058 rcd->rcvhdrqtailaddr_phys);
11059 rcd->seq_cnt = 1;
11060
11061 /* reset the cached receive header queue head value */
11062 rcd->head = 0;
11063
11064 /*
11065 * Zero the receive header queue so we don't get false
11066 * positives when checking the sequence number. The
11067 * sequence numbers could land exactly on the same spot.
11068 * E.g. a rcd restart before the receive header wrapped.
11069 */
11070 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11071
11072 /* starting timeout */
11073 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11074
11075 /* enable the context */
11076 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11077
11078 /* clean the egr buffer size first */
11079 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11080 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11081 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11082 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11083
11084 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11085 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11086 did_enable = 1;
11087
11088 /* zero RcvEgrIndexHead */
11089 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11090
11091 /* set eager count and base index */
11092 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11093 & RCV_EGR_CTRL_EGR_CNT_MASK)
11094 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11095 (((rcd->eager_base >> RCV_SHIFT)
11096 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11097 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11098 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11099
11100 /*
11101 * Set TID (expected) count and base index.
11102 * rcd->expected_count is set to individual RcvArray entries,
11103 * not pairs, and the CSR takes a pair-count in groups of
11104 * four, so divide by 8.
11105 */
11106 reg = (((rcd->expected_count >> RCV_SHIFT)
11107 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11108 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11109 (((rcd->expected_base >> RCV_SHIFT)
11110 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11111 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11112 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011113 if (ctxt == HFI1_CTRL_CTXT)
11114 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011115 }
11116 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11117 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011118 /*
11119 * When receive context is being disabled turn on tail
11120 * update with a dummy tail address and then disable
11121 * receive context.
11122 */
11123 if (dd->rcvhdrtail_dummy_physaddr) {
11124 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11125 dd->rcvhdrtail_dummy_physaddr);
11126 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11127 }
11128
Mike Marciniszyn77241052015-07-30 15:17:43 -040011129 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11130 }
11131 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11132 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11133 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11134 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11135 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11136 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11137 if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11138 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11139 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11140 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11141 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11142 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11143 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11144 /* In one-packet-per-eager mode, the size comes from
11145 the RcvArray entry. */
11146 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11147 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11148 }
11149 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11150 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11151 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11152 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11153 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11154 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11155 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11156 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11157 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11158 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11159 rcd->rcvctrl = rcvctrl;
11160 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11161 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11162
11163 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11164 if (did_enable
11165 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11166 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11167 if (reg != 0) {
11168 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11169 ctxt, reg);
11170 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11171 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11172 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11173 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11174 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11175 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11176 ctxt, reg, reg == 0 ? "not" : "still");
11177 }
11178 }
11179
11180 if (did_enable) {
11181 /*
11182 * The interrupt timeout and count must be set after
11183 * the context is enabled to take effect.
11184 */
11185 /* set interrupt timeout */
11186 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11187 (u64)rcd->rcvavail_timeout <<
11188 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11189
11190 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11191 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11192 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11193 }
11194
11195 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11196 /*
11197 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011198 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11199 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011200 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011201 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11202 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011203}
11204
11205u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11206 u64 **cntrp)
11207{
11208 int ret;
11209 u64 val = 0;
11210
11211 if (namep) {
11212 ret = dd->cntrnameslen;
11213 if (pos != 0) {
11214 dd_dev_err(dd, "read_cntrs does not support indexing");
11215 return 0;
11216 }
11217 *namep = dd->cntrnames;
11218 } else {
11219 const struct cntr_entry *entry;
11220 int i, j;
11221
11222 ret = (dd->ndevcntrs) * sizeof(u64);
11223 if (pos != 0) {
11224 dd_dev_err(dd, "read_cntrs does not support indexing");
11225 return 0;
11226 }
11227
11228 /* Get the start of the block of counters */
11229 *cntrp = dd->cntrs;
11230
11231 /*
11232 * Now go and fill in each counter in the block.
11233 */
11234 for (i = 0; i < DEV_CNTR_LAST; i++) {
11235 entry = &dev_cntrs[i];
11236 hfi1_cdbg(CNTR, "reading %s", entry->name);
11237 if (entry->flags & CNTR_DISABLED) {
11238 /* Nothing */
11239 hfi1_cdbg(CNTR, "\tDisabled\n");
11240 } else {
11241 if (entry->flags & CNTR_VL) {
11242 hfi1_cdbg(CNTR, "\tPer VL\n");
11243 for (j = 0; j < C_VL_COUNT; j++) {
11244 val = entry->rw_cntr(entry,
11245 dd, j,
11246 CNTR_MODE_R,
11247 0);
11248 hfi1_cdbg(
11249 CNTR,
11250 "\t\tRead 0x%llx for %d\n",
11251 val, j);
11252 dd->cntrs[entry->offset + j] =
11253 val;
11254 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011255 } else if (entry->flags & CNTR_SDMA) {
11256 hfi1_cdbg(CNTR,
11257 "\t Per SDMA Engine\n");
11258 for (j = 0; j < dd->chip_sdma_engines;
11259 j++) {
11260 val =
11261 entry->rw_cntr(entry, dd, j,
11262 CNTR_MODE_R, 0);
11263 hfi1_cdbg(CNTR,
11264 "\t\tRead 0x%llx for %d\n",
11265 val, j);
11266 dd->cntrs[entry->offset + j] =
11267 val;
11268 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011269 } else {
11270 val = entry->rw_cntr(entry, dd,
11271 CNTR_INVALID_VL,
11272 CNTR_MODE_R, 0);
11273 dd->cntrs[entry->offset] = val;
11274 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11275 }
11276 }
11277 }
11278 }
11279 return ret;
11280}
11281
11282/*
11283 * Used by sysfs to create files for hfi stats to read
11284 */
11285u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11286 char **namep, u64 **cntrp)
11287{
11288 int ret;
11289 u64 val = 0;
11290
11291 if (namep) {
11292 ret = dd->portcntrnameslen;
11293 if (pos != 0) {
11294 dd_dev_err(dd, "index not supported");
11295 return 0;
11296 }
11297 *namep = dd->portcntrnames;
11298 } else {
11299 const struct cntr_entry *entry;
11300 struct hfi1_pportdata *ppd;
11301 int i, j;
11302
11303 ret = (dd->nportcntrs) * sizeof(u64);
11304 if (pos != 0) {
11305 dd_dev_err(dd, "indexing not supported");
11306 return 0;
11307 }
11308 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11309 *cntrp = ppd->cntrs;
11310
11311 for (i = 0; i < PORT_CNTR_LAST; i++) {
11312 entry = &port_cntrs[i];
11313 hfi1_cdbg(CNTR, "reading %s", entry->name);
11314 if (entry->flags & CNTR_DISABLED) {
11315 /* Nothing */
11316 hfi1_cdbg(CNTR, "\tDisabled\n");
11317 continue;
11318 }
11319
11320 if (entry->flags & CNTR_VL) {
11321 hfi1_cdbg(CNTR, "\tPer VL");
11322 for (j = 0; j < C_VL_COUNT; j++) {
11323 val = entry->rw_cntr(entry, ppd, j,
11324 CNTR_MODE_R,
11325 0);
11326 hfi1_cdbg(
11327 CNTR,
11328 "\t\tRead 0x%llx for %d",
11329 val, j);
11330 ppd->cntrs[entry->offset + j] = val;
11331 }
11332 } else {
11333 val = entry->rw_cntr(entry, ppd,
11334 CNTR_INVALID_VL,
11335 CNTR_MODE_R,
11336 0);
11337 ppd->cntrs[entry->offset] = val;
11338 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11339 }
11340 }
11341 }
11342 return ret;
11343}
11344
11345static void free_cntrs(struct hfi1_devdata *dd)
11346{
11347 struct hfi1_pportdata *ppd;
11348 int i;
11349
11350 if (dd->synth_stats_timer.data)
11351 del_timer_sync(&dd->synth_stats_timer);
11352 dd->synth_stats_timer.data = 0;
11353 ppd = (struct hfi1_pportdata *)(dd + 1);
11354 for (i = 0; i < dd->num_pports; i++, ppd++) {
11355 kfree(ppd->cntrs);
11356 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011357 free_percpu(ppd->ibport_data.rvp.rc_acks);
11358 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11359 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011360 ppd->cntrs = NULL;
11361 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011362 ppd->ibport_data.rvp.rc_acks = NULL;
11363 ppd->ibport_data.rvp.rc_qacks = NULL;
11364 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011365 }
11366 kfree(dd->portcntrnames);
11367 dd->portcntrnames = NULL;
11368 kfree(dd->cntrs);
11369 dd->cntrs = NULL;
11370 kfree(dd->scntrs);
11371 dd->scntrs = NULL;
11372 kfree(dd->cntrnames);
11373 dd->cntrnames = NULL;
11374}
11375
11376#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11377#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11378
11379static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11380 u64 *psval, void *context, int vl)
11381{
11382 u64 val;
11383 u64 sval = *psval;
11384
11385 if (entry->flags & CNTR_DISABLED) {
11386 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11387 return 0;
11388 }
11389
11390 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11391
11392 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11393
11394 /* If its a synthetic counter there is more work we need to do */
11395 if (entry->flags & CNTR_SYNTH) {
11396 if (sval == CNTR_MAX) {
11397 /* No need to read already saturated */
11398 return CNTR_MAX;
11399 }
11400
11401 if (entry->flags & CNTR_32BIT) {
11402 /* 32bit counters can wrap multiple times */
11403 u64 upper = sval >> 32;
11404 u64 lower = (sval << 32) >> 32;
11405
11406 if (lower > val) { /* hw wrapped */
11407 if (upper == CNTR_32BIT_MAX)
11408 val = CNTR_MAX;
11409 else
11410 upper++;
11411 }
11412
11413 if (val != CNTR_MAX)
11414 val = (upper << 32) | val;
11415
11416 } else {
11417 /* If we rolled we are saturated */
11418 if ((val < sval) || (val > CNTR_MAX))
11419 val = CNTR_MAX;
11420 }
11421 }
11422
11423 *psval = val;
11424
11425 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11426
11427 return val;
11428}
11429
11430static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11431 struct cntr_entry *entry,
11432 u64 *psval, void *context, int vl, u64 data)
11433{
11434 u64 val;
11435
11436 if (entry->flags & CNTR_DISABLED) {
11437 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11438 return 0;
11439 }
11440
11441 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11442
11443 if (entry->flags & CNTR_SYNTH) {
11444 *psval = data;
11445 if (entry->flags & CNTR_32BIT) {
11446 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11447 (data << 32) >> 32);
11448 val = data; /* return the full 64bit value */
11449 } else {
11450 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11451 data);
11452 }
11453 } else {
11454 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11455 }
11456
11457 *psval = val;
11458
11459 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11460
11461 return val;
11462}
11463
11464u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11465{
11466 struct cntr_entry *entry;
11467 u64 *sval;
11468
11469 entry = &dev_cntrs[index];
11470 sval = dd->scntrs + entry->offset;
11471
11472 if (vl != CNTR_INVALID_VL)
11473 sval += vl;
11474
11475 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11476}
11477
11478u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11479{
11480 struct cntr_entry *entry;
11481 u64 *sval;
11482
11483 entry = &dev_cntrs[index];
11484 sval = dd->scntrs + entry->offset;
11485
11486 if (vl != CNTR_INVALID_VL)
11487 sval += vl;
11488
11489 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11490}
11491
11492u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11493{
11494 struct cntr_entry *entry;
11495 u64 *sval;
11496
11497 entry = &port_cntrs[index];
11498 sval = ppd->scntrs + entry->offset;
11499
11500 if (vl != CNTR_INVALID_VL)
11501 sval += vl;
11502
11503 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11504 (index <= C_RCV_HDR_OVF_LAST)) {
11505 /* We do not want to bother for disabled contexts */
11506 return 0;
11507 }
11508
11509 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11510}
11511
11512u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11513{
11514 struct cntr_entry *entry;
11515 u64 *sval;
11516
11517 entry = &port_cntrs[index];
11518 sval = ppd->scntrs + entry->offset;
11519
11520 if (vl != CNTR_INVALID_VL)
11521 sval += vl;
11522
11523 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11524 (index <= C_RCV_HDR_OVF_LAST)) {
11525 /* We do not want to bother for disabled contexts */
11526 return 0;
11527 }
11528
11529 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11530}
11531
11532static void update_synth_timer(unsigned long opaque)
11533{
11534 u64 cur_tx;
11535 u64 cur_rx;
11536 u64 total_flits;
11537 u8 update = 0;
11538 int i, j, vl;
11539 struct hfi1_pportdata *ppd;
11540 struct cntr_entry *entry;
11541
11542 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11543
11544 /*
11545 * Rather than keep beating on the CSRs pick a minimal set that we can
11546 * check to watch for potential roll over. We can do this by looking at
11547 * the number of flits sent/recv. If the total flits exceeds 32bits then
11548 * we have to iterate all the counters and update.
11549 */
11550 entry = &dev_cntrs[C_DC_RCV_FLITS];
11551 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11552
11553 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11554 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11555
11556 hfi1_cdbg(
11557 CNTR,
11558 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11559 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11560
11561 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11562 /*
11563 * May not be strictly necessary to update but it won't hurt and
11564 * simplifies the logic here.
11565 */
11566 update = 1;
11567 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11568 dd->unit);
11569 } else {
11570 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11571 hfi1_cdbg(CNTR,
11572 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11573 total_flits, (u64)CNTR_32BIT_MAX);
11574 if (total_flits >= CNTR_32BIT_MAX) {
11575 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11576 dd->unit);
11577 update = 1;
11578 }
11579 }
11580
11581 if (update) {
11582 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11583 for (i = 0; i < DEV_CNTR_LAST; i++) {
11584 entry = &dev_cntrs[i];
11585 if (entry->flags & CNTR_VL) {
11586 for (vl = 0; vl < C_VL_COUNT; vl++)
11587 read_dev_cntr(dd, i, vl);
11588 } else {
11589 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11590 }
11591 }
11592 ppd = (struct hfi1_pportdata *)(dd + 1);
11593 for (i = 0; i < dd->num_pports; i++, ppd++) {
11594 for (j = 0; j < PORT_CNTR_LAST; j++) {
11595 entry = &port_cntrs[j];
11596 if (entry->flags & CNTR_VL) {
11597 for (vl = 0; vl < C_VL_COUNT; vl++)
11598 read_port_cntr(ppd, j, vl);
11599 } else {
11600 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11601 }
11602 }
11603 }
11604
11605 /*
11606 * We want the value in the register. The goal is to keep track
11607 * of the number of "ticks" not the counter value. In other
11608 * words if the register rolls we want to notice it and go ahead
11609 * and force an update.
11610 */
11611 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11612 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11613 CNTR_MODE_R, 0);
11614
11615 entry = &dev_cntrs[C_DC_RCV_FLITS];
11616 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11617 CNTR_MODE_R, 0);
11618
11619 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11620 dd->unit, dd->last_tx, dd->last_rx);
11621
11622 } else {
11623 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11624 }
11625
11626mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11627}
11628
11629#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11630static int init_cntrs(struct hfi1_devdata *dd)
11631{
Dean Luickc024c552016-01-11 18:30:57 -050011632 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011633 size_t sz;
11634 char *p;
11635 char name[C_MAX_NAME];
11636 struct hfi1_pportdata *ppd;
11637
11638 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011639 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11640 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011641
11642 /***********************/
11643 /* per device counters */
11644 /***********************/
11645
11646 /* size names and determine how many we have*/
11647 dd->ndevcntrs = 0;
11648 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011649
11650 for (i = 0; i < DEV_CNTR_LAST; i++) {
11651 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11652 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11653 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11654 continue;
11655 }
11656
11657 if (dev_cntrs[i].flags & CNTR_VL) {
11658 hfi1_dbg_early("\tProcessing VL cntr\n");
Dean Luickc024c552016-01-11 18:30:57 -050011659 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011660 for (j = 0; j < C_VL_COUNT; j++) {
11661 memset(name, '\0', C_MAX_NAME);
11662 snprintf(name, C_MAX_NAME, "%s%d",
11663 dev_cntrs[i].name,
11664 vl_from_idx(j));
11665 sz += strlen(name);
11666 sz++;
11667 hfi1_dbg_early("\t\t%s\n", name);
11668 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011669 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011670 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11671 hfi1_dbg_early(
11672 "\tProcessing per SDE counters chip enginers %u\n",
11673 dd->chip_sdma_engines);
Dean Luickc024c552016-01-11 18:30:57 -050011674 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011675 for (j = 0; j < dd->chip_sdma_engines; j++) {
11676 memset(name, '\0', C_MAX_NAME);
11677 snprintf(name, C_MAX_NAME, "%s%d",
11678 dev_cntrs[i].name, j);
11679 sz += strlen(name);
11680 sz++;
11681 hfi1_dbg_early("\t\t%s\n", name);
11682 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011683 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011684 } else {
11685 /* +1 for newline */
11686 sz += strlen(dev_cntrs[i].name) + 1;
Dean Luickc024c552016-01-11 18:30:57 -050011687 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011688 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011689 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11690 }
11691 }
11692
11693 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011694 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011695 if (!dd->cntrs)
11696 goto bail;
11697
Dean Luickc024c552016-01-11 18:30:57 -050011698 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011699 if (!dd->scntrs)
11700 goto bail;
11701
11702
11703 /* allocate space for the counter names */
11704 dd->cntrnameslen = sz;
11705 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11706 if (!dd->cntrnames)
11707 goto bail;
11708
11709 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011710 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011711 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11712 /* Nothing */
11713 } else {
11714 if (dev_cntrs[i].flags & CNTR_VL) {
11715 for (j = 0; j < C_VL_COUNT; j++) {
11716 memset(name, '\0', C_MAX_NAME);
11717 snprintf(name, C_MAX_NAME, "%s%d",
11718 dev_cntrs[i].name,
11719 vl_from_idx(j));
11720 memcpy(p, name, strlen(name));
11721 p += strlen(name);
11722 *p++ = '\n';
11723 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011724 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11725 for (j = 0; j < TXE_NUM_SDMA_ENGINES;
11726 j++) {
11727 memset(name, '\0', C_MAX_NAME);
11728 snprintf(name, C_MAX_NAME, "%s%d",
11729 dev_cntrs[i].name, j);
11730 memcpy(p, name, strlen(name));
11731 p += strlen(name);
11732 *p++ = '\n';
11733 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011734 } else {
11735 memcpy(p, dev_cntrs[i].name,
11736 strlen(dev_cntrs[i].name));
11737 p += strlen(dev_cntrs[i].name);
11738 *p++ = '\n';
11739 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011740 }
11741 }
11742
11743 /*********************/
11744 /* per port counters */
11745 /*********************/
11746
11747 /*
11748 * Go through the counters for the overflows and disable the ones we
11749 * don't need. This varies based on platform so we need to do it
11750 * dynamically here.
11751 */
11752 rcv_ctxts = dd->num_rcv_contexts;
11753 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11754 i <= C_RCV_HDR_OVF_LAST; i++) {
11755 port_cntrs[i].flags |= CNTR_DISABLED;
11756 }
11757
11758 /* size port counter names and determine how many we have*/
11759 sz = 0;
11760 dd->nportcntrs = 0;
11761 for (i = 0; i < PORT_CNTR_LAST; i++) {
11762 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11763 if (port_cntrs[i].flags & CNTR_DISABLED) {
11764 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11765 continue;
11766 }
11767
11768 if (port_cntrs[i].flags & CNTR_VL) {
11769 hfi1_dbg_early("\tProcessing VL cntr\n");
11770 port_cntrs[i].offset = dd->nportcntrs;
11771 for (j = 0; j < C_VL_COUNT; j++) {
11772 memset(name, '\0', C_MAX_NAME);
11773 snprintf(name, C_MAX_NAME, "%s%d",
11774 port_cntrs[i].name,
11775 vl_from_idx(j));
11776 sz += strlen(name);
11777 sz++;
11778 hfi1_dbg_early("\t\t%s\n", name);
11779 dd->nportcntrs++;
11780 }
11781 } else {
11782 /* +1 for newline */
11783 sz += strlen(port_cntrs[i].name) + 1;
11784 port_cntrs[i].offset = dd->nportcntrs;
11785 dd->nportcntrs++;
11786 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11787 }
11788 }
11789
11790 /* allocate space for the counter names */
11791 dd->portcntrnameslen = sz;
11792 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11793 if (!dd->portcntrnames)
11794 goto bail;
11795
11796 /* fill in port cntr names */
11797 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11798 if (port_cntrs[i].flags & CNTR_DISABLED)
11799 continue;
11800
11801 if (port_cntrs[i].flags & CNTR_VL) {
11802 for (j = 0; j < C_VL_COUNT; j++) {
11803 memset(name, '\0', C_MAX_NAME);
11804 snprintf(name, C_MAX_NAME, "%s%d",
11805 port_cntrs[i].name,
11806 vl_from_idx(j));
11807 memcpy(p, name, strlen(name));
11808 p += strlen(name);
11809 *p++ = '\n';
11810 }
11811 } else {
11812 memcpy(p, port_cntrs[i].name,
11813 strlen(port_cntrs[i].name));
11814 p += strlen(port_cntrs[i].name);
11815 *p++ = '\n';
11816 }
11817 }
11818
11819 /* allocate per port storage for counter values */
11820 ppd = (struct hfi1_pportdata *)(dd + 1);
11821 for (i = 0; i < dd->num_pports; i++, ppd++) {
11822 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11823 if (!ppd->cntrs)
11824 goto bail;
11825
11826 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11827 if (!ppd->scntrs)
11828 goto bail;
11829 }
11830
11831 /* CPU counters need to be allocated and zeroed */
11832 if (init_cpu_counters(dd))
11833 goto bail;
11834
11835 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11836 return 0;
11837bail:
11838 free_cntrs(dd);
11839 return -ENOMEM;
11840}
11841
11842
11843static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11844{
11845 switch (chip_lstate) {
11846 default:
11847 dd_dev_err(dd,
11848 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11849 chip_lstate);
11850 /* fall through */
11851 case LSTATE_DOWN:
11852 return IB_PORT_DOWN;
11853 case LSTATE_INIT:
11854 return IB_PORT_INIT;
11855 case LSTATE_ARMED:
11856 return IB_PORT_ARMED;
11857 case LSTATE_ACTIVE:
11858 return IB_PORT_ACTIVE;
11859 }
11860}
11861
11862u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11863{
11864 /* look at the HFI meta-states only */
11865 switch (chip_pstate & 0xf0) {
11866 default:
11867 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11868 chip_pstate);
11869 /* fall through */
11870 case PLS_DISABLED:
11871 return IB_PORTPHYSSTATE_DISABLED;
11872 case PLS_OFFLINE:
11873 return OPA_PORTPHYSSTATE_OFFLINE;
11874 case PLS_POLLING:
11875 return IB_PORTPHYSSTATE_POLLING;
11876 case PLS_CONFIGPHY:
11877 return IB_PORTPHYSSTATE_TRAINING;
11878 case PLS_LINKUP:
11879 return IB_PORTPHYSSTATE_LINKUP;
11880 case PLS_PHYTEST:
11881 return IB_PORTPHYSSTATE_PHY_TEST;
11882 }
11883}
11884
11885/* return the OPA port logical state name */
11886const char *opa_lstate_name(u32 lstate)
11887{
11888 static const char * const port_logical_names[] = {
11889 "PORT_NOP",
11890 "PORT_DOWN",
11891 "PORT_INIT",
11892 "PORT_ARMED",
11893 "PORT_ACTIVE",
11894 "PORT_ACTIVE_DEFER",
11895 };
11896 if (lstate < ARRAY_SIZE(port_logical_names))
11897 return port_logical_names[lstate];
11898 return "unknown";
11899}
11900
11901/* return the OPA port physical state name */
11902const char *opa_pstate_name(u32 pstate)
11903{
11904 static const char * const port_physical_names[] = {
11905 "PHYS_NOP",
11906 "reserved1",
11907 "PHYS_POLL",
11908 "PHYS_DISABLED",
11909 "PHYS_TRAINING",
11910 "PHYS_LINKUP",
11911 "PHYS_LINK_ERR_RECOVER",
11912 "PHYS_PHY_TEST",
11913 "reserved8",
11914 "PHYS_OFFLINE",
11915 "PHYS_GANGED",
11916 "PHYS_TEST",
11917 };
11918 if (pstate < ARRAY_SIZE(port_physical_names))
11919 return port_physical_names[pstate];
11920 return "unknown";
11921}
11922
11923/*
11924 * Read the hardware link state and set the driver's cached value of it.
11925 * Return the (new) current value.
11926 */
11927u32 get_logical_state(struct hfi1_pportdata *ppd)
11928{
11929 u32 new_state;
11930
11931 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11932 if (new_state != ppd->lstate) {
11933 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11934 opa_lstate_name(new_state), new_state);
11935 ppd->lstate = new_state;
11936 }
11937 /*
11938 * Set port status flags in the page mapped into userspace
11939 * memory. Do it here to ensure a reliable state - this is
11940 * the only function called by all state handling code.
11941 * Always set the flags due to the fact that the cache value
11942 * might have been changed explicitly outside of this
11943 * function.
11944 */
11945 if (ppd->statusp) {
11946 switch (ppd->lstate) {
11947 case IB_PORT_DOWN:
11948 case IB_PORT_INIT:
11949 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11950 HFI1_STATUS_IB_READY);
11951 break;
11952 case IB_PORT_ARMED:
11953 *ppd->statusp |= HFI1_STATUS_IB_CONF;
11954 break;
11955 case IB_PORT_ACTIVE:
11956 *ppd->statusp |= HFI1_STATUS_IB_READY;
11957 break;
11958 }
11959 }
11960 return ppd->lstate;
11961}
11962
11963/**
11964 * wait_logical_linkstate - wait for an IB link state change to occur
11965 * @ppd: port device
11966 * @state: the state to wait for
11967 * @msecs: the number of milliseconds to wait
11968 *
11969 * Wait up to msecs milliseconds for IB link state change to occur.
11970 * For now, take the easy polling route.
11971 * Returns 0 if state reached, otherwise -ETIMEDOUT.
11972 */
11973static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
11974 int msecs)
11975{
11976 unsigned long timeout;
11977
11978 timeout = jiffies + msecs_to_jiffies(msecs);
11979 while (1) {
11980 if (get_logical_state(ppd) == state)
11981 return 0;
11982 if (time_after(jiffies, timeout))
11983 break;
11984 msleep(20);
11985 }
11986 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
11987
11988 return -ETIMEDOUT;
11989}
11990
11991u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
11992{
11993 static u32 remembered_state = 0xff;
11994 u32 pstate;
11995 u32 ib_pstate;
11996
11997 pstate = read_physical_state(ppd->dd);
11998 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
11999 if (remembered_state != ib_pstate) {
12000 dd_dev_info(ppd->dd,
12001 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12002 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12003 pstate);
12004 remembered_state = ib_pstate;
12005 }
12006 return ib_pstate;
12007}
12008
12009/*
12010 * Read/modify/write ASIC_QSFP register bits as selected by mask
12011 * data: 0 or 1 in the positions depending on what needs to be written
12012 * dir: 0 for read, 1 for write
12013 * mask: select by setting
12014 * I2CCLK (bit 0)
12015 * I2CDATA (bit 1)
12016 */
12017u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12018 u32 mask)
12019{
12020 u64 qsfp_oe, target_oe;
12021
12022 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12023 if (mask) {
12024 /* We are writing register bits, so lock access */
12025 dir &= mask;
12026 data &= mask;
12027
12028 qsfp_oe = read_csr(dd, target_oe);
12029 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12030 write_csr(dd, target_oe, qsfp_oe);
12031 }
12032 /* We are exclusively reading bits here, but it is unlikely
12033 * we'll get valid data when we set the direction of the pin
12034 * in the same call, so read should call this function again
12035 * to get valid data
12036 */
12037 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12038}
12039
12040#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12041(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12042
12043#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12044(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12045
12046int hfi1_init_ctxt(struct send_context *sc)
12047{
12048 if (sc != NULL) {
12049 struct hfi1_devdata *dd = sc->dd;
12050 u64 reg;
12051 u8 set = (sc->type == SC_USER ?
12052 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12053 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12054 reg = read_kctxt_csr(dd, sc->hw_context,
12055 SEND_CTXT_CHECK_ENABLE);
12056 if (set)
12057 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12058 else
12059 SET_STATIC_RATE_CONTROL_SMASK(reg);
12060 write_kctxt_csr(dd, sc->hw_context,
12061 SEND_CTXT_CHECK_ENABLE, reg);
12062 }
12063 return 0;
12064}
12065
12066int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12067{
12068 int ret = 0;
12069 u64 reg;
12070
12071 if (dd->icode != ICODE_RTL_SILICON) {
12072 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12073 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12074 __func__);
12075 return -EINVAL;
12076 }
12077 reg = read_csr(dd, ASIC_STS_THERM);
12078 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12079 ASIC_STS_THERM_CURR_TEMP_MASK);
12080 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12081 ASIC_STS_THERM_LO_TEMP_MASK);
12082 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12083 ASIC_STS_THERM_HI_TEMP_MASK);
12084 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12085 ASIC_STS_THERM_CRIT_TEMP_MASK);
12086 /* triggers is a 3-bit value - 1 bit per trigger. */
12087 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12088
12089 return ret;
12090}
12091
12092/* ========================================================================= */
12093
12094/*
12095 * Enable/disable chip from delivering interrupts.
12096 */
12097void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12098{
12099 int i;
12100
12101 /*
12102 * In HFI, the mask needs to be 1 to allow interrupts.
12103 */
12104 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012105 /* enable all interrupts */
12106 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12107 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12108
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012109 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012110 } else {
12111 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12112 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12113 }
12114}
12115
12116/*
12117 * Clear all interrupt sources on the chip.
12118 */
12119static void clear_all_interrupts(struct hfi1_devdata *dd)
12120{
12121 int i;
12122
12123 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12124 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12125
12126 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12127 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12128 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12129 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12130 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12131 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12132 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12133 for (i = 0; i < dd->chip_send_contexts; i++)
12134 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12135 for (i = 0; i < dd->chip_sdma_engines; i++)
12136 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12137
12138 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12139 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12140 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12141}
12142
12143/* Move to pcie.c? */
12144static void disable_intx(struct pci_dev *pdev)
12145{
12146 pci_intx(pdev, 0);
12147}
12148
12149static void clean_up_interrupts(struct hfi1_devdata *dd)
12150{
12151 int i;
12152
12153 /* remove irqs - must happen before disabling/turning off */
12154 if (dd->num_msix_entries) {
12155 /* MSI-X */
12156 struct hfi1_msix_entry *me = dd->msix_entries;
12157
12158 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12159 if (me->arg == NULL) /* => no irq, no affinity */
12160 break;
12161 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12162 NULL);
12163 free_irq(me->msix.vector, me->arg);
12164 }
12165 } else {
12166 /* INTx */
12167 if (dd->requested_intx_irq) {
12168 free_irq(dd->pcidev->irq, dd);
12169 dd->requested_intx_irq = 0;
12170 }
12171 }
12172
12173 /* turn off interrupts */
12174 if (dd->num_msix_entries) {
12175 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012176 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012177 } else {
12178 /* INTx */
12179 disable_intx(dd->pcidev);
12180 }
12181
12182 /* clean structures */
12183 for (i = 0; i < dd->num_msix_entries; i++)
12184 free_cpumask_var(dd->msix_entries[i].mask);
12185 kfree(dd->msix_entries);
12186 dd->msix_entries = NULL;
12187 dd->num_msix_entries = 0;
12188}
12189
12190/*
12191 * Remap the interrupt source from the general handler to the given MSI-X
12192 * interrupt.
12193 */
12194static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12195{
12196 u64 reg;
12197 int m, n;
12198
12199 /* clear from the handled mask of the general interrupt */
12200 m = isrc / 64;
12201 n = isrc % 64;
12202 dd->gi_mask[m] &= ~((u64)1 << n);
12203
12204 /* direct the chip source to the given MSI-X interrupt */
12205 m = isrc / 8;
12206 n = isrc % 8;
12207 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12208 reg &= ~((u64)0xff << (8*n));
12209 reg |= ((u64)msix_intr & 0xff) << (8*n);
12210 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12211}
12212
12213static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12214 int engine, int msix_intr)
12215{
12216 /*
12217 * SDMA engine interrupt sources grouped by type, rather than
12218 * engine. Per-engine interrupts are as follows:
12219 * SDMA
12220 * SDMAProgress
12221 * SDMAIdle
12222 */
12223 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12224 msix_intr);
12225 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12226 msix_intr);
12227 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12228 msix_intr);
12229}
12230
Mike Marciniszyn77241052015-07-30 15:17:43 -040012231static int request_intx_irq(struct hfi1_devdata *dd)
12232{
12233 int ret;
12234
Jubin John98050712015-11-16 21:59:27 -050012235 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12236 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012237 ret = request_irq(dd->pcidev->irq, general_interrupt,
12238 IRQF_SHARED, dd->intx_name, dd);
12239 if (ret)
12240 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12241 ret);
12242 else
12243 dd->requested_intx_irq = 1;
12244 return ret;
12245}
12246
12247static int request_msix_irqs(struct hfi1_devdata *dd)
12248{
12249 const struct cpumask *local_mask;
12250 cpumask_var_t def, rcv;
12251 bool def_ret, rcv_ret;
12252 int first_general, last_general;
12253 int first_sdma, last_sdma;
12254 int first_rx, last_rx;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012255 int first_cpu, curr_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012256 int rcv_cpu, sdma_cpu;
12257 int i, ret = 0, possible;
12258 int ht;
12259
12260 /* calculate the ranges we are going to use */
12261 first_general = 0;
12262 first_sdma = last_general = first_general + 1;
12263 first_rx = last_sdma = first_sdma + dd->num_sdma;
12264 last_rx = first_rx + dd->n_krcv_queues;
12265
12266 /*
12267 * Interrupt affinity.
12268 *
12269 * non-rcv avail gets a default mask that
12270 * starts as possible cpus with threads reset
12271 * and each rcv avail reset.
12272 *
12273 * rcv avail gets node relative 1 wrapping back
12274 * to the node relative 1 as necessary.
12275 *
12276 */
12277 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12278 /* if first cpu is invalid, use NUMA 0 */
12279 if (cpumask_first(local_mask) >= nr_cpu_ids)
12280 local_mask = topology_core_cpumask(0);
12281
12282 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12283 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12284 if (!def_ret || !rcv_ret)
12285 goto bail;
12286 /* use local mask as default */
12287 cpumask_copy(def, local_mask);
12288 possible = cpumask_weight(def);
12289 /* disarm threads from default */
12290 ht = cpumask_weight(
12291 topology_sibling_cpumask(cpumask_first(local_mask)));
12292 for (i = possible/ht; i < possible; i++)
12293 cpumask_clear_cpu(i, def);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012294 /* def now has full cores on chosen node*/
12295 first_cpu = cpumask_first(def);
12296 if (nr_cpu_ids >= first_cpu)
12297 first_cpu++;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012298 curr_cpu = first_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012299
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012300 /* One context is reserved as control context */
12301 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012302 cpumask_clear_cpu(curr_cpu, def);
12303 cpumask_set_cpu(curr_cpu, rcv);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012304 curr_cpu = cpumask_next(curr_cpu, def);
12305 if (curr_cpu >= nr_cpu_ids)
12306 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012307 }
12308 /* def mask has non-rcv, rcv has recv mask */
12309 rcv_cpu = cpumask_first(rcv);
12310 sdma_cpu = cpumask_first(def);
12311
12312 /*
12313 * Sanity check - the code expects all SDMA chip source
12314 * interrupts to be in the same CSR, starting at bit 0. Verify
12315 * that this is true by checking the bit location of the start.
12316 */
12317 BUILD_BUG_ON(IS_SDMA_START % 64);
12318
12319 for (i = 0; i < dd->num_msix_entries; i++) {
12320 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12321 const char *err_info;
12322 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012323 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012324 void *arg;
12325 int idx;
12326 struct hfi1_ctxtdata *rcd = NULL;
12327 struct sdma_engine *sde = NULL;
12328
12329 /* obtain the arguments to request_irq */
12330 if (first_general <= i && i < last_general) {
12331 idx = i - first_general;
12332 handler = general_interrupt;
12333 arg = dd;
12334 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012335 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012336 err_info = "general";
12337 } else if (first_sdma <= i && i < last_sdma) {
12338 idx = i - first_sdma;
12339 sde = &dd->per_sdma[idx];
12340 handler = sdma_interrupt;
12341 arg = sde;
12342 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012343 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012344 err_info = "sdma";
12345 remap_sdma_interrupts(dd, idx, i);
12346 } else if (first_rx <= i && i < last_rx) {
12347 idx = i - first_rx;
12348 rcd = dd->rcd[idx];
12349 /* no interrupt if no rcd */
12350 if (!rcd)
12351 continue;
12352 /*
12353 * Set the interrupt register and mask for this
12354 * context's interrupt.
12355 */
12356 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12357 rcd->imask = ((u64)1) <<
12358 ((IS_RCVAVAIL_START+idx) % 64);
12359 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012360 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012361 arg = rcd;
12362 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012363 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012364 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012365 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012366 } else {
12367 /* not in our expected range - complain, then
12368 ignore it */
12369 dd_dev_err(dd,
12370 "Unexpected extra MSI-X interrupt %d\n", i);
12371 continue;
12372 }
12373 /* no argument, no interrupt */
12374 if (arg == NULL)
12375 continue;
12376 /* make sure the name is terminated */
12377 me->name[sizeof(me->name)-1] = 0;
12378
Dean Luickf4f30031c2015-10-26 10:28:44 -040012379 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12380 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012381 if (ret) {
12382 dd_dev_err(dd,
12383 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12384 err_info, me->msix.vector, idx, ret);
12385 return ret;
12386 }
12387 /*
12388 * assign arg after request_irq call, so it will be
12389 * cleaned up
12390 */
12391 me->arg = arg;
12392
12393 if (!zalloc_cpumask_var(
12394 &dd->msix_entries[i].mask,
12395 GFP_KERNEL))
12396 goto bail;
12397 if (handler == sdma_interrupt) {
12398 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12399 sde->this_idx, sdma_cpu);
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -050012400 sde->cpu = sdma_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012401 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12402 sdma_cpu = cpumask_next(sdma_cpu, def);
12403 if (sdma_cpu >= nr_cpu_ids)
12404 sdma_cpu = cpumask_first(def);
12405 } else if (handler == receive_context_interrupt) {
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012406 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12407 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12408 cpumask_first(def) : rcv_cpu);
12409 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12410 /* map to first default */
12411 cpumask_set_cpu(cpumask_first(def),
12412 dd->msix_entries[i].mask);
12413 } else {
12414 cpumask_set_cpu(rcv_cpu,
12415 dd->msix_entries[i].mask);
12416 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12417 if (rcv_cpu >= nr_cpu_ids)
12418 rcv_cpu = cpumask_first(rcv);
12419 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012420 } else {
12421 /* otherwise first def */
12422 dd_dev_info(dd, "%s cpu %d\n",
12423 err_info, cpumask_first(def));
12424 cpumask_set_cpu(
12425 cpumask_first(def), dd->msix_entries[i].mask);
12426 }
12427 irq_set_affinity_hint(
12428 dd->msix_entries[i].msix.vector,
12429 dd->msix_entries[i].mask);
12430 }
12431
12432out:
12433 free_cpumask_var(def);
12434 free_cpumask_var(rcv);
12435 return ret;
12436bail:
12437 ret = -ENOMEM;
12438 goto out;
12439}
12440
12441/*
12442 * Set the general handler to accept all interrupts, remap all
12443 * chip interrupts back to MSI-X 0.
12444 */
12445static void reset_interrupts(struct hfi1_devdata *dd)
12446{
12447 int i;
12448
12449 /* all interrupts handled by the general handler */
12450 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12451 dd->gi_mask[i] = ~(u64)0;
12452
12453 /* all chip interrupts map to MSI-X 0 */
12454 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12455 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12456}
12457
12458static int set_up_interrupts(struct hfi1_devdata *dd)
12459{
12460 struct hfi1_msix_entry *entries;
12461 u32 total, request;
12462 int i, ret;
12463 int single_interrupt = 0; /* we expect to have all the interrupts */
12464
12465 /*
12466 * Interrupt count:
12467 * 1 general, "slow path" interrupt (includes the SDMA engines
12468 * slow source, SDMACleanupDone)
12469 * N interrupts - one per used SDMA engine
12470 * M interrupt - one per kernel receive context
12471 */
12472 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12473
12474 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12475 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012476 ret = -ENOMEM;
12477 goto fail;
12478 }
12479 /* 1-1 MSI-X entry assignment */
12480 for (i = 0; i < total; i++)
12481 entries[i].msix.entry = i;
12482
12483 /* ask for MSI-X interrupts */
12484 request = total;
12485 request_msix(dd, &request, entries);
12486
12487 if (request == 0) {
12488 /* using INTx */
12489 /* dd->num_msix_entries already zero */
12490 kfree(entries);
12491 single_interrupt = 1;
12492 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12493 } else {
12494 /* using MSI-X */
12495 dd->num_msix_entries = request;
12496 dd->msix_entries = entries;
12497
12498 if (request != total) {
12499 /* using MSI-X, with reduced interrupts */
12500 dd_dev_err(
12501 dd,
12502 "cannot handle reduced interrupt case, want %u, got %u\n",
12503 total, request);
12504 ret = -EINVAL;
12505 goto fail;
12506 }
12507 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12508 }
12509
12510 /* mask all interrupts */
12511 set_intr_state(dd, 0);
12512 /* clear all pending interrupts */
12513 clear_all_interrupts(dd);
12514
12515 /* reset general handler mask, chip MSI-X mappings */
12516 reset_interrupts(dd);
12517
12518 if (single_interrupt)
12519 ret = request_intx_irq(dd);
12520 else
12521 ret = request_msix_irqs(dd);
12522 if (ret)
12523 goto fail;
12524
12525 return 0;
12526
12527fail:
12528 clean_up_interrupts(dd);
12529 return ret;
12530}
12531
12532/*
12533 * Set up context values in dd. Sets:
12534 *
12535 * num_rcv_contexts - number of contexts being used
12536 * n_krcv_queues - number of kernel contexts
12537 * first_user_ctxt - first non-kernel context in array of contexts
12538 * freectxts - number of free user contexts
12539 * num_send_contexts - number of PIO send contexts being used
12540 */
12541static int set_up_context_variables(struct hfi1_devdata *dd)
12542{
12543 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012544 int total_contexts;
12545 int ret;
12546 unsigned ngroups;
12547
12548 /*
12549 * Kernel contexts: (to be fixed later):
12550 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012551 * - Context 0 - control context (VL15/multicast/error)
12552 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012553 */
12554 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012555 /*
12556 * Don't count context 0 in n_krcvqs since
12557 * is isn't used for normal verbs traffic.
12558 *
12559 * krcvqs will reflect number of kernel
12560 * receive contexts above 0.
12561 */
12562 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012563 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012564 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012565 num_kernel_contexts =
12566 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12567 /*
12568 * Every kernel receive context needs an ACK send context.
12569 * one send context is allocated for each VL{0-7} and VL15
12570 */
12571 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12572 dd_dev_err(dd,
12573 "Reducing # kernel rcv contexts to: %d, from %d\n",
12574 (int)(dd->chip_send_contexts - num_vls - 1),
12575 (int)num_kernel_contexts);
12576 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12577 }
12578 /*
12579 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012580 * - default to 1 user context per CPU if num_user_contexts is
12581 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012582 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012583 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012584 num_user_contexts = num_online_cpus();
12585
12586 total_contexts = num_kernel_contexts + num_user_contexts;
12587
12588 /*
12589 * Adjust the counts given a global max.
12590 */
12591 if (total_contexts > dd->chip_rcv_contexts) {
12592 dd_dev_err(dd,
12593 "Reducing # user receive contexts to: %d, from %d\n",
12594 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12595 (int)num_user_contexts);
12596 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12597 /* recalculate */
12598 total_contexts = num_kernel_contexts + num_user_contexts;
12599 }
12600
12601 /* the first N are kernel contexts, the rest are user contexts */
12602 dd->num_rcv_contexts = total_contexts;
12603 dd->n_krcv_queues = num_kernel_contexts;
12604 dd->first_user_ctxt = num_kernel_contexts;
12605 dd->freectxts = num_user_contexts;
12606 dd_dev_info(dd,
12607 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12608 (int)dd->chip_rcv_contexts,
12609 (int)dd->num_rcv_contexts,
12610 (int)dd->n_krcv_queues,
12611 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12612
12613 /*
12614 * Receive array allocation:
12615 * All RcvArray entries are divided into groups of 8. This
12616 * is required by the hardware and will speed up writes to
12617 * consecutive entries by using write-combining of the entire
12618 * cacheline.
12619 *
12620 * The number of groups are evenly divided among all contexts.
12621 * any left over groups will be given to the first N user
12622 * contexts.
12623 */
12624 dd->rcv_entries.group_size = RCV_INCREMENT;
12625 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12626 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12627 dd->rcv_entries.nctxt_extra = ngroups -
12628 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12629 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12630 dd->rcv_entries.ngroups,
12631 dd->rcv_entries.nctxt_extra);
12632 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12633 MAX_EAGER_ENTRIES * 2) {
12634 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12635 dd->rcv_entries.group_size;
12636 dd_dev_info(dd,
12637 "RcvArray group count too high, change to %u\n",
12638 dd->rcv_entries.ngroups);
12639 dd->rcv_entries.nctxt_extra = 0;
12640 }
12641 /*
12642 * PIO send contexts
12643 */
12644 ret = init_sc_pools_and_sizes(dd);
12645 if (ret >= 0) { /* success */
12646 dd->num_send_contexts = ret;
12647 dd_dev_info(
12648 dd,
12649 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12650 dd->chip_send_contexts,
12651 dd->num_send_contexts,
12652 dd->sc_sizes[SC_KERNEL].count,
12653 dd->sc_sizes[SC_ACK].count,
12654 dd->sc_sizes[SC_USER].count);
12655 ret = 0; /* success */
12656 }
12657
12658 return ret;
12659}
12660
12661/*
12662 * Set the device/port partition key table. The MAD code
12663 * will ensure that, at least, the partial management
12664 * partition key is present in the table.
12665 */
12666static void set_partition_keys(struct hfi1_pportdata *ppd)
12667{
12668 struct hfi1_devdata *dd = ppd->dd;
12669 u64 reg = 0;
12670 int i;
12671
12672 dd_dev_info(dd, "Setting partition keys\n");
12673 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12674 reg |= (ppd->pkeys[i] &
12675 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12676 ((i % 4) *
12677 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12678 /* Each register holds 4 PKey values. */
12679 if ((i % 4) == 3) {
12680 write_csr(dd, RCV_PARTITION_KEY +
12681 ((i - 3) * 2), reg);
12682 reg = 0;
12683 }
12684 }
12685
12686 /* Always enable HW pkeys check when pkeys table is set */
12687 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12688}
12689
12690/*
12691 * These CSRs and memories are uninitialized on reset and must be
12692 * written before reading to set the ECC/parity bits.
12693 *
12694 * NOTE: All user context CSRs that are not mmaped write-only
12695 * (e.g. the TID flows) must be initialized even if the driver never
12696 * reads them.
12697 */
12698static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12699{
12700 int i, j;
12701
12702 /* CceIntMap */
12703 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12704 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12705
12706 /* SendCtxtCreditReturnAddr */
12707 for (i = 0; i < dd->chip_send_contexts; i++)
12708 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12709
12710 /* PIO Send buffers */
12711 /* SDMA Send buffers */
12712 /* These are not normally read, and (presently) have no method
12713 to be read, so are not pre-initialized */
12714
12715 /* RcvHdrAddr */
12716 /* RcvHdrTailAddr */
12717 /* RcvTidFlowTable */
12718 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12719 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12720 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12721 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12722 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12723 }
12724
12725 /* RcvArray */
12726 for (i = 0; i < dd->chip_rcv_array_count; i++)
12727 write_csr(dd, RCV_ARRAY + (8*i),
12728 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12729
12730 /* RcvQPMapTable */
12731 for (i = 0; i < 32; i++)
12732 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12733}
12734
12735/*
12736 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12737 */
12738static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12739 u64 ctrl_bits)
12740{
12741 unsigned long timeout;
12742 u64 reg;
12743
12744 /* is the condition present? */
12745 reg = read_csr(dd, CCE_STATUS);
12746 if ((reg & status_bits) == 0)
12747 return;
12748
12749 /* clear the condition */
12750 write_csr(dd, CCE_CTRL, ctrl_bits);
12751
12752 /* wait for the condition to clear */
12753 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12754 while (1) {
12755 reg = read_csr(dd, CCE_STATUS);
12756 if ((reg & status_bits) == 0)
12757 return;
12758 if (time_after(jiffies, timeout)) {
12759 dd_dev_err(dd,
12760 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12761 status_bits, reg & status_bits);
12762 return;
12763 }
12764 udelay(1);
12765 }
12766}
12767
12768/* set CCE CSRs to chip reset defaults */
12769static void reset_cce_csrs(struct hfi1_devdata *dd)
12770{
12771 int i;
12772
12773 /* CCE_REVISION read-only */
12774 /* CCE_REVISION2 read-only */
12775 /* CCE_CTRL - bits clear automatically */
12776 /* CCE_STATUS read-only, use CceCtrl to clear */
12777 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12778 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12779 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12780 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12781 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12782 /* CCE_ERR_STATUS read-only */
12783 write_csr(dd, CCE_ERR_MASK, 0);
12784 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12785 /* CCE_ERR_FORCE leave alone */
12786 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12787 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12788 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12789 /* CCE_PCIE_CTRL leave alone */
12790 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12791 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12792 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12793 CCE_MSIX_TABLE_UPPER_RESETCSR);
12794 }
12795 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12796 /* CCE_MSIX_PBA read-only */
12797 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12798 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12799 }
12800 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12801 write_csr(dd, CCE_INT_MAP, 0);
12802 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12803 /* CCE_INT_STATUS read-only */
12804 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12805 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12806 /* CCE_INT_FORCE leave alone */
12807 /* CCE_INT_BLOCKED read-only */
12808 }
12809 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12810 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12811}
12812
12813/* set ASIC CSRs to chip reset defaults */
12814static void reset_asic_csrs(struct hfi1_devdata *dd)
12815{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012816 int i;
12817
12818 /*
12819 * If the HFIs are shared between separate nodes or VMs,
12820 * then more will need to be done here. One idea is a module
12821 * parameter that returns early, letting the first power-on or
12822 * a known first load do the reset and blocking all others.
12823 */
12824
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012825 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12826 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012827
12828 if (dd->icode != ICODE_FPGA_EMULATION) {
12829 /* emulation does not have an SBus - leave these alone */
12830 /*
12831 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12832 * Notes:
12833 * o The reset is not zero if aimed at the core. See the
12834 * SBus documentation for details.
12835 * o If the SBus firmware has been updated (e.g. by the BIOS),
12836 * will the reset revert that?
12837 */
12838 /* ASIC_CFG_SBUS_REQUEST leave alone */
12839 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12840 }
12841 /* ASIC_SBUS_RESULT read-only */
12842 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12843 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12844 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12845 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012846
12847 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012848 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012849
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012850 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012851 /* ASIC_STS_THERM read-only */
12852 /* ASIC_CFG_RESET leave alone */
12853
12854 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12855 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12856 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12857 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12858 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12859 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12860 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12861 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12862 for (i = 0; i < 16; i++)
12863 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12864
12865 /* ASIC_GPIO_IN read-only */
12866 write_csr(dd, ASIC_GPIO_OE, 0);
12867 write_csr(dd, ASIC_GPIO_INVERT, 0);
12868 write_csr(dd, ASIC_GPIO_OUT, 0);
12869 write_csr(dd, ASIC_GPIO_MASK, 0);
12870 /* ASIC_GPIO_STATUS read-only */
12871 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12872 /* ASIC_GPIO_FORCE leave alone */
12873
12874 /* ASIC_QSFP1_IN read-only */
12875 write_csr(dd, ASIC_QSFP1_OE, 0);
12876 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12877 write_csr(dd, ASIC_QSFP1_OUT, 0);
12878 write_csr(dd, ASIC_QSFP1_MASK, 0);
12879 /* ASIC_QSFP1_STATUS read-only */
12880 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12881 /* ASIC_QSFP1_FORCE leave alone */
12882
12883 /* ASIC_QSFP2_IN read-only */
12884 write_csr(dd, ASIC_QSFP2_OE, 0);
12885 write_csr(dd, ASIC_QSFP2_INVERT, 0);
12886 write_csr(dd, ASIC_QSFP2_OUT, 0);
12887 write_csr(dd, ASIC_QSFP2_MASK, 0);
12888 /* ASIC_QSFP2_STATUS read-only */
12889 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12890 /* ASIC_QSFP2_FORCE leave alone */
12891
12892 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12893 /* this also writes a NOP command, clearing paging mode */
12894 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12895 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012896}
12897
12898/* set MISC CSRs to chip reset defaults */
12899static void reset_misc_csrs(struct hfi1_devdata *dd)
12900{
12901 int i;
12902
12903 for (i = 0; i < 32; i++) {
12904 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12905 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12906 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12907 }
12908 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12909 only be written 128-byte chunks */
12910 /* init RSA engine to clear lingering errors */
12911 write_csr(dd, MISC_CFG_RSA_CMD, 1);
12912 write_csr(dd, MISC_CFG_RSA_MU, 0);
12913 write_csr(dd, MISC_CFG_FW_CTRL, 0);
12914 /* MISC_STS_8051_DIGEST read-only */
12915 /* MISC_STS_SBM_DIGEST read-only */
12916 /* MISC_STS_PCIE_DIGEST read-only */
12917 /* MISC_STS_FAB_DIGEST read-only */
12918 /* MISC_ERR_STATUS read-only */
12919 write_csr(dd, MISC_ERR_MASK, 0);
12920 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12921 /* MISC_ERR_FORCE leave alone */
12922}
12923
12924/* set TXE CSRs to chip reset defaults */
12925static void reset_txe_csrs(struct hfi1_devdata *dd)
12926{
12927 int i;
12928
12929 /*
12930 * TXE Kernel CSRs
12931 */
12932 write_csr(dd, SEND_CTRL, 0);
12933 __cm_reset(dd, 0); /* reset CM internal state */
12934 /* SEND_CONTEXTS read-only */
12935 /* SEND_DMA_ENGINES read-only */
12936 /* SEND_PIO_MEM_SIZE read-only */
12937 /* SEND_DMA_MEM_SIZE read-only */
12938 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12939 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
12940 /* SEND_PIO_ERR_STATUS read-only */
12941 write_csr(dd, SEND_PIO_ERR_MASK, 0);
12942 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12943 /* SEND_PIO_ERR_FORCE leave alone */
12944 /* SEND_DMA_ERR_STATUS read-only */
12945 write_csr(dd, SEND_DMA_ERR_MASK, 0);
12946 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12947 /* SEND_DMA_ERR_FORCE leave alone */
12948 /* SEND_EGRESS_ERR_STATUS read-only */
12949 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12950 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12951 /* SEND_EGRESS_ERR_FORCE leave alone */
12952 write_csr(dd, SEND_BTH_QP, 0);
12953 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12954 write_csr(dd, SEND_SC2VLT0, 0);
12955 write_csr(dd, SEND_SC2VLT1, 0);
12956 write_csr(dd, SEND_SC2VLT2, 0);
12957 write_csr(dd, SEND_SC2VLT3, 0);
12958 write_csr(dd, SEND_LEN_CHECK0, 0);
12959 write_csr(dd, SEND_LEN_CHECK1, 0);
12960 /* SEND_ERR_STATUS read-only */
12961 write_csr(dd, SEND_ERR_MASK, 0);
12962 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
12963 /* SEND_ERR_FORCE read-only */
12964 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
12965 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
12966 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
12967 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
12968 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
12969 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
12970 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
12971 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
12972 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
12973 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
12974 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
12975 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
12976 SEND_CM_GLOBAL_CREDIT_RESETCSR);
12977 /* SEND_CM_CREDIT_USED_STATUS read-only */
12978 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
12979 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
12980 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
12981 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
12982 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
12983 for (i = 0; i < TXE_NUM_DATA_VL; i++)
12984 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
12985 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
12986 /* SEND_CM_CREDIT_USED_VL read-only */
12987 /* SEND_CM_CREDIT_USED_VL15 read-only */
12988 /* SEND_EGRESS_CTXT_STATUS read-only */
12989 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
12990 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
12991 /* SEND_EGRESS_ERR_INFO read-only */
12992 /* SEND_EGRESS_ERR_SOURCE read-only */
12993
12994 /*
12995 * TXE Per-Context CSRs
12996 */
12997 for (i = 0; i < dd->chip_send_contexts; i++) {
12998 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
12999 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13000 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13001 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13002 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13003 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13004 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13005 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13006 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13007 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13008 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13009 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13010 }
13011
13012 /*
13013 * TXE Per-SDMA CSRs
13014 */
13015 for (i = 0; i < dd->chip_sdma_engines; i++) {
13016 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13017 /* SEND_DMA_STATUS read-only */
13018 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13019 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13020 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13021 /* SEND_DMA_HEAD read-only */
13022 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13023 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13024 /* SEND_DMA_IDLE_CNT read-only */
13025 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13026 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13027 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13028 /* SEND_DMA_ENG_ERR_STATUS read-only */
13029 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13030 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13031 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13032 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13033 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13034 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13035 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13036 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13037 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13038 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13039 }
13040}
13041
13042/*
13043 * Expect on entry:
13044 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13045 */
13046static void init_rbufs(struct hfi1_devdata *dd)
13047{
13048 u64 reg;
13049 int count;
13050
13051 /*
13052 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13053 * clear.
13054 */
13055 count = 0;
13056 while (1) {
13057 reg = read_csr(dd, RCV_STATUS);
13058 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13059 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13060 break;
13061 /*
13062 * Give up after 1ms - maximum wait time.
13063 *
13064 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13065 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13066 * 148 KB / (66% * 250MB/s) = 920us
13067 */
13068 if (count++ > 500) {
13069 dd_dev_err(dd,
13070 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13071 __func__, reg);
13072 break;
13073 }
13074 udelay(2); /* do not busy-wait the CSR */
13075 }
13076
13077 /* start the init - expect RcvCtrl to be 0 */
13078 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13079
13080 /*
13081 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13082 * period after the write before RcvStatus.RxRbufInitDone is valid.
13083 * The delay in the first run through the loop below is sufficient and
13084 * required before the first read of RcvStatus.RxRbufInintDone.
13085 */
13086 read_csr(dd, RCV_CTRL);
13087
13088 /* wait for the init to finish */
13089 count = 0;
13090 while (1) {
13091 /* delay is required first time through - see above */
13092 udelay(2); /* do not busy-wait the CSR */
13093 reg = read_csr(dd, RCV_STATUS);
13094 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13095 break;
13096
13097 /* give up after 100us - slowest possible at 33MHz is 73us */
13098 if (count++ > 50) {
13099 dd_dev_err(dd,
13100 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13101 __func__);
13102 break;
13103 }
13104 }
13105}
13106
13107/* set RXE CSRs to chip reset defaults */
13108static void reset_rxe_csrs(struct hfi1_devdata *dd)
13109{
13110 int i, j;
13111
13112 /*
13113 * RXE Kernel CSRs
13114 */
13115 write_csr(dd, RCV_CTRL, 0);
13116 init_rbufs(dd);
13117 /* RCV_STATUS read-only */
13118 /* RCV_CONTEXTS read-only */
13119 /* RCV_ARRAY_CNT read-only */
13120 /* RCV_BUF_SIZE read-only */
13121 write_csr(dd, RCV_BTH_QP, 0);
13122 write_csr(dd, RCV_MULTICAST, 0);
13123 write_csr(dd, RCV_BYPASS, 0);
13124 write_csr(dd, RCV_VL15, 0);
13125 /* this is a clear-down */
13126 write_csr(dd, RCV_ERR_INFO,
13127 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13128 /* RCV_ERR_STATUS read-only */
13129 write_csr(dd, RCV_ERR_MASK, 0);
13130 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13131 /* RCV_ERR_FORCE leave alone */
13132 for (i = 0; i < 32; i++)
13133 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13134 for (i = 0; i < 4; i++)
13135 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13136 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13137 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13138 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13139 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13140 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13141 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13142 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13143 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13144 }
13145 for (i = 0; i < 32; i++)
13146 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13147
13148 /*
13149 * RXE Kernel and User Per-Context CSRs
13150 */
13151 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13152 /* kernel */
13153 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13154 /* RCV_CTXT_STATUS read-only */
13155 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13156 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13157 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13158 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13159 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13160 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13161 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13162 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13163 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13164 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13165
13166 /* user */
13167 /* RCV_HDR_TAIL read-only */
13168 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13169 /* RCV_EGR_INDEX_TAIL read-only */
13170 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13171 /* RCV_EGR_OFFSET_TAIL read-only */
13172 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13173 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13174 0);
13175 }
13176 }
13177}
13178
13179/*
13180 * Set sc2vl tables.
13181 *
13182 * They power on to zeros, so to avoid send context errors
13183 * they need to be set:
13184 *
13185 * SC 0-7 -> VL 0-7 (respectively)
13186 * SC 15 -> VL 15
13187 * otherwise
13188 * -> VL 0
13189 */
13190static void init_sc2vl_tables(struct hfi1_devdata *dd)
13191{
13192 int i;
13193 /* init per architecture spec, constrained by hardware capability */
13194
13195 /* HFI maps sent packets */
13196 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13197 0,
13198 0, 0, 1, 1,
13199 2, 2, 3, 3,
13200 4, 4, 5, 5,
13201 6, 6, 7, 7));
13202 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13203 1,
13204 8, 0, 9, 0,
13205 10, 0, 11, 0,
13206 12, 0, 13, 0,
13207 14, 0, 15, 15));
13208 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13209 2,
13210 16, 0, 17, 0,
13211 18, 0, 19, 0,
13212 20, 0, 21, 0,
13213 22, 0, 23, 0));
13214 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13215 3,
13216 24, 0, 25, 0,
13217 26, 0, 27, 0,
13218 28, 0, 29, 0,
13219 30, 0, 31, 0));
13220
13221 /* DC maps received packets */
13222 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13223 15_0,
13224 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13225 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13226 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13227 31_16,
13228 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13229 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13230
13231 /* initialize the cached sc2vl values consistently with h/w */
13232 for (i = 0; i < 32; i++) {
13233 if (i < 8 || i == 15)
13234 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13235 else
13236 *((u8 *)(dd->sc2vl) + i) = 0;
13237 }
13238}
13239
13240/*
13241 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13242 * depend on the chip going through a power-on reset - a driver may be loaded
13243 * and unloaded many times.
13244 *
13245 * Do not write any CSR values to the chip in this routine - there may be
13246 * a reset following the (possible) FLR in this routine.
13247 *
13248 */
13249static void init_chip(struct hfi1_devdata *dd)
13250{
13251 int i;
13252
13253 /*
13254 * Put the HFI CSRs in a known state.
13255 * Combine this with a DC reset.
13256 *
13257 * Stop the device from doing anything while we do a
13258 * reset. We know there are no other active users of
13259 * the device since we are now in charge. Turn off
13260 * off all outbound and inbound traffic and make sure
13261 * the device does not generate any interrupts.
13262 */
13263
13264 /* disable send contexts and SDMA engines */
13265 write_csr(dd, SEND_CTRL, 0);
13266 for (i = 0; i < dd->chip_send_contexts; i++)
13267 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13268 for (i = 0; i < dd->chip_sdma_engines; i++)
13269 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13270 /* disable port (turn off RXE inbound traffic) and contexts */
13271 write_csr(dd, RCV_CTRL, 0);
13272 for (i = 0; i < dd->chip_rcv_contexts; i++)
13273 write_csr(dd, RCV_CTXT_CTRL, 0);
13274 /* mask all interrupt sources */
13275 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13276 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13277
13278 /*
13279 * DC Reset: do a full DC reset before the register clear.
13280 * A recommended length of time to hold is one CSR read,
13281 * so reread the CceDcCtrl. Then, hold the DC in reset
13282 * across the clear.
13283 */
13284 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13285 (void) read_csr(dd, CCE_DC_CTRL);
13286
13287 if (use_flr) {
13288 /*
13289 * A FLR will reset the SPC core and part of the PCIe.
13290 * The parts that need to be restored have already been
13291 * saved.
13292 */
13293 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13294
13295 /* do the FLR, the DC reset will remain */
13296 hfi1_pcie_flr(dd);
13297
13298 /* restore command and BARs */
13299 restore_pci_variables(dd);
13300
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013301 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013302 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13303 hfi1_pcie_flr(dd);
13304 restore_pci_variables(dd);
13305 }
13306
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013307 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013308 } else {
13309 dd_dev_info(dd, "Resetting CSRs with writes\n");
13310 reset_cce_csrs(dd);
13311 reset_txe_csrs(dd);
13312 reset_rxe_csrs(dd);
13313 reset_asic_csrs(dd);
13314 reset_misc_csrs(dd);
13315 }
13316 /* clear the DC reset */
13317 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013318
Mike Marciniszyn77241052015-07-30 15:17:43 -040013319 /* Set the LED off */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013320 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013321 setextled(dd, 0);
13322 /*
13323 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013324 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013325 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013326 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013327 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013328 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013329 * I2CCLK and I2CDAT will change per direction, and INT_N and
13330 * MODPRS_N are input only and their value is ignored.
13331 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013332 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13333 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013334}
13335
13336static void init_early_variables(struct hfi1_devdata *dd)
13337{
13338 int i;
13339
13340 /* assign link credit variables */
13341 dd->vau = CM_VAU;
13342 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013343 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013344 dd->link_credits--;
13345 dd->vcu = cu_to_vcu(hfi1_cu);
13346 /* enough room for 8 MAD packets plus header - 17K */
13347 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13348 if (dd->vl15_init > dd->link_credits)
13349 dd->vl15_init = dd->link_credits;
13350
13351 write_uninitialized_csrs_and_memories(dd);
13352
13353 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13354 for (i = 0; i < dd->num_pports; i++) {
13355 struct hfi1_pportdata *ppd = &dd->pport[i];
13356
13357 set_partition_keys(ppd);
13358 }
13359 init_sc2vl_tables(dd);
13360}
13361
13362static void init_kdeth_qp(struct hfi1_devdata *dd)
13363{
13364 /* user changed the KDETH_QP */
13365 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13366 /* out of range or illegal value */
13367 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13368 kdeth_qp = 0;
13369 }
13370 if (kdeth_qp == 0) /* not set, or failed range check */
13371 kdeth_qp = DEFAULT_KDETH_QP;
13372
13373 write_csr(dd, SEND_BTH_QP,
13374 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13375 << SEND_BTH_QP_KDETH_QP_SHIFT);
13376
13377 write_csr(dd, RCV_BTH_QP,
13378 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13379 << RCV_BTH_QP_KDETH_QP_SHIFT);
13380}
13381
13382/**
13383 * init_qpmap_table
13384 * @dd - device data
13385 * @first_ctxt - first context
13386 * @last_ctxt - first context
13387 *
13388 * This return sets the qpn mapping table that
13389 * is indexed by qpn[8:1].
13390 *
13391 * The routine will round robin the 256 settings
13392 * from first_ctxt to last_ctxt.
13393 *
13394 * The first/last looks ahead to having specialized
13395 * receive contexts for mgmt and bypass. Normal
13396 * verbs traffic will assumed to be on a range
13397 * of receive contexts.
13398 */
13399static void init_qpmap_table(struct hfi1_devdata *dd,
13400 u32 first_ctxt,
13401 u32 last_ctxt)
13402{
13403 u64 reg = 0;
13404 u64 regno = RCV_QP_MAP_TABLE;
13405 int i;
13406 u64 ctxt = first_ctxt;
13407
13408 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013409 reg |= ctxt << (8 * (i % 8));
13410 i++;
13411 ctxt++;
13412 if (ctxt > last_ctxt)
13413 ctxt = first_ctxt;
13414 if (i % 8 == 0) {
13415 write_csr(dd, regno, reg);
13416 reg = 0;
13417 regno += 8;
13418 }
13419 }
13420 if (i % 8)
13421 write_csr(dd, regno, reg);
13422
13423 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13424 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13425}
13426
13427/**
13428 * init_qos - init RX qos
13429 * @dd - device data
13430 * @first_context
13431 *
13432 * This routine initializes Rule 0 and the
13433 * RSM map table to implement qos.
13434 *
13435 * If all of the limit tests succeed,
13436 * qos is applied based on the array
13437 * interpretation of krcvqs where
13438 * entry 0 is VL0.
13439 *
13440 * The number of vl bits (n) and the number of qpn
13441 * bits (m) are computed to feed both the RSM map table
13442 * and the single rule.
13443 *
13444 */
13445static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13446{
13447 u8 max_by_vl = 0;
13448 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13449 u64 *rsmmap;
13450 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013451 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013452
13453 /* validate */
13454 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13455 num_vls == 1 ||
13456 krcvqsset <= 1)
13457 goto bail;
13458 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13459 if (krcvqs[i] > max_by_vl)
13460 max_by_vl = krcvqs[i];
13461 if (max_by_vl > 32)
13462 goto bail;
13463 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13464 /* determine bits vl */
13465 n = ilog2(num_vls);
13466 /* determine bits for qpn */
13467 m = ilog2(qpns_per_vl);
13468 if ((m + n) > 7)
13469 goto bail;
13470 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13471 goto bail;
13472 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013473 if (!rsmmap)
13474 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013475 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13476 /* init the local copy of the table */
13477 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13478 unsigned tctxt;
13479
13480 for (qpn = 0, tctxt = ctxt;
13481 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13482 unsigned idx, regoff, regidx;
13483
13484 /* generate index <= 128 */
13485 idx = (qpn << n) ^ i;
13486 regoff = (idx % 8) * 8;
13487 regidx = idx / 8;
13488 reg = rsmmap[regidx];
13489 /* replace 0xff with context number */
13490 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13491 << regoff);
13492 reg |= (u64)(tctxt++) << regoff;
13493 rsmmap[regidx] = reg;
13494 if (tctxt == ctxt + krcvqs[i])
13495 tctxt = ctxt;
13496 }
13497 ctxt += krcvqs[i];
13498 }
13499 /* flush cached copies to chip */
13500 for (i = 0; i < NUM_MAP_REGS; i++)
13501 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13502 /* add rule0 */
13503 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13504 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13505 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13506 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13507 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13508 LRH_BTH_MATCH_OFFSET
13509 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13510 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13511 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13512 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13513 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13514 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13515 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13516 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13517 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13518 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13519 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13520 /* Enable RSM */
13521 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13522 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013523 /* map everything else to first context */
13524 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013525 dd->qos_shift = n + 1;
13526 return;
13527bail:
13528 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013529 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013530}
13531
13532static void init_rxe(struct hfi1_devdata *dd)
13533{
13534 /* enable all receive errors */
13535 write_csr(dd, RCV_ERR_MASK, ~0ull);
13536 /* setup QPN map table - start where VL15 context leaves off */
13537 init_qos(
13538 dd,
13539 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13540 /*
13541 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13542 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13543 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13544 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13545 * Max_PayLoad_Size set to its minimum of 128.
13546 *
13547 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13548 * (64 bytes). Max_Payload_Size is possibly modified upward in
13549 * tune_pcie_caps() which is called after this routine.
13550 */
13551}
13552
13553static void init_other(struct hfi1_devdata *dd)
13554{
13555 /* enable all CCE errors */
13556 write_csr(dd, CCE_ERR_MASK, ~0ull);
13557 /* enable *some* Misc errors */
13558 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13559 /* enable all DC errors, except LCB */
13560 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13561 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13562}
13563
13564/*
13565 * Fill out the given AU table using the given CU. A CU is defined in terms
13566 * AUs. The table is a an encoding: given the index, how many AUs does that
13567 * represent?
13568 *
13569 * NOTE: Assumes that the register layout is the same for the
13570 * local and remote tables.
13571 */
13572static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13573 u32 csr0to3, u32 csr4to7)
13574{
13575 write_csr(dd, csr0to3,
13576 0ull <<
13577 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13578 | 1ull <<
13579 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13580 | 2ull * cu <<
13581 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13582 | 4ull * cu <<
13583 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13584 write_csr(dd, csr4to7,
13585 8ull * cu <<
13586 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13587 | 16ull * cu <<
13588 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13589 | 32ull * cu <<
13590 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13591 | 64ull * cu <<
13592 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13593
13594}
13595
13596static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13597{
13598 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13599 SEND_CM_LOCAL_AU_TABLE4_TO7);
13600}
13601
13602void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13603{
13604 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13605 SEND_CM_REMOTE_AU_TABLE4_TO7);
13606}
13607
13608static void init_txe(struct hfi1_devdata *dd)
13609{
13610 int i;
13611
13612 /* enable all PIO, SDMA, general, and Egress errors */
13613 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13614 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13615 write_csr(dd, SEND_ERR_MASK, ~0ull);
13616 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13617
13618 /* enable all per-context and per-SDMA engine errors */
13619 for (i = 0; i < dd->chip_send_contexts; i++)
13620 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13621 for (i = 0; i < dd->chip_sdma_engines; i++)
13622 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13623
13624 /* set the local CU to AU mapping */
13625 assign_local_cm_au_table(dd, dd->vcu);
13626
13627 /*
13628 * Set reasonable default for Credit Return Timer
13629 * Don't set on Simulator - causes it to choke.
13630 */
13631 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13632 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13633}
13634
13635int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13636{
13637 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13638 unsigned sctxt;
13639 int ret = 0;
13640 u64 reg;
13641
13642 if (!rcd || !rcd->sc) {
13643 ret = -EINVAL;
13644 goto done;
13645 }
13646 sctxt = rcd->sc->hw_context;
13647 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13648 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13649 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13650 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13651 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13652 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13653 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13654 /*
13655 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013656 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013657 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013658 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13659 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13660 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13661 }
13662
13663 /* Enable J_KEY check on receive context. */
13664 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13665 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13666 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13667 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13668done:
13669 return ret;
13670}
13671
13672int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13673{
13674 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13675 unsigned sctxt;
13676 int ret = 0;
13677 u64 reg;
13678
13679 if (!rcd || !rcd->sc) {
13680 ret = -EINVAL;
13681 goto done;
13682 }
13683 sctxt = rcd->sc->hw_context;
13684 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13685 /*
13686 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13687 * This check would not have been enabled for A0 h/w, see
13688 * set_ctxt_jkey().
13689 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013690 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013691 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13692 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13693 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13694 }
13695 /* Turn off the J_KEY on the receive side */
13696 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13697done:
13698 return ret;
13699}
13700
13701int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13702{
13703 struct hfi1_ctxtdata *rcd;
13704 unsigned sctxt;
13705 int ret = 0;
13706 u64 reg;
13707
13708 if (ctxt < dd->num_rcv_contexts)
13709 rcd = dd->rcd[ctxt];
13710 else {
13711 ret = -EINVAL;
13712 goto done;
13713 }
13714 if (!rcd || !rcd->sc) {
13715 ret = -EINVAL;
13716 goto done;
13717 }
13718 sctxt = rcd->sc->hw_context;
13719 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13720 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13721 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13722 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13723 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13724 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13725done:
13726 return ret;
13727}
13728
13729int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13730{
13731 struct hfi1_ctxtdata *rcd;
13732 unsigned sctxt;
13733 int ret = 0;
13734 u64 reg;
13735
13736 if (ctxt < dd->num_rcv_contexts)
13737 rcd = dd->rcd[ctxt];
13738 else {
13739 ret = -EINVAL;
13740 goto done;
13741 }
13742 if (!rcd || !rcd->sc) {
13743 ret = -EINVAL;
13744 goto done;
13745 }
13746 sctxt = rcd->sc->hw_context;
13747 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13748 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13749 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13750 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13751done:
13752 return ret;
13753}
13754
13755/*
13756 * Start doing the clean up the the chip. Our clean up happens in multiple
13757 * stages and this is just the first.
13758 */
13759void hfi1_start_cleanup(struct hfi1_devdata *dd)
13760{
13761 free_cntrs(dd);
13762 free_rcverr(dd);
13763 clean_up_interrupts(dd);
13764}
13765
13766#define HFI_BASE_GUID(dev) \
13767 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13768
13769/*
13770 * Certain chip functions need to be initialized only once per asic
13771 * instead of per-device. This function finds the peer device and
13772 * checks whether that chip initialization needs to be done by this
13773 * device.
13774 */
13775static void asic_should_init(struct hfi1_devdata *dd)
13776{
13777 unsigned long flags;
13778 struct hfi1_devdata *tmp, *peer = NULL;
13779
13780 spin_lock_irqsave(&hfi1_devs_lock, flags);
13781 /* Find our peer device */
13782 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13783 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13784 dd->unit != tmp->unit) {
13785 peer = tmp;
13786 break;
13787 }
13788 }
13789
13790 /*
13791 * "Claim" the ASIC for initialization if it hasn't been
13792 " "claimed" yet.
13793 */
13794 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13795 dd->flags |= HFI1_DO_INIT_ASIC;
13796 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13797}
13798
Dean Luick5d9157a2015-11-16 21:59:34 -050013799/*
13800 * Set dd->boardname. Use a generic name if a name is not returned from
13801 * EFI variable space.
13802 *
13803 * Return 0 on success, -ENOMEM if space could not be allocated.
13804 */
13805static int obtain_boardname(struct hfi1_devdata *dd)
13806{
13807 /* generic board description */
13808 const char generic[] =
13809 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13810 unsigned long size;
13811 int ret;
13812
13813 ret = read_hfi1_efi_var(dd, "description", &size,
13814 (void **)&dd->boardname);
13815 if (ret) {
13816 dd_dev_err(dd, "Board description not found\n");
13817 /* use generic description */
13818 dd->boardname = kstrdup(generic, GFP_KERNEL);
13819 if (!dd->boardname)
13820 return -ENOMEM;
13821 }
13822 return 0;
13823}
13824
Mike Marciniszyn77241052015-07-30 15:17:43 -040013825/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013826 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013827 * @dev: the pci_dev for hfi1_ib device
13828 * @ent: pci_device_id struct for this dev
13829 *
13830 * Also allocates, initializes, and returns the devdata struct for this
13831 * device instance
13832 *
13833 * This is global, and is called directly at init to set up the
13834 * chip-specific function pointers for later use.
13835 */
13836struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13837 const struct pci_device_id *ent)
13838{
13839 struct hfi1_devdata *dd;
13840 struct hfi1_pportdata *ppd;
13841 u64 reg;
13842 int i, ret;
13843 static const char * const inames[] = { /* implementation names */
13844 "RTL silicon",
13845 "RTL VCS simulation",
13846 "RTL FPGA emulation",
13847 "Functional simulator"
13848 };
13849
13850 dd = hfi1_alloc_devdata(pdev,
13851 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13852 if (IS_ERR(dd))
13853 goto bail;
13854 ppd = dd->pport;
13855 for (i = 0; i < dd->num_pports; i++, ppd++) {
13856 int vl;
13857 /* init common fields */
13858 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13859 /* DC supports 4 link widths */
13860 ppd->link_width_supported =
13861 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13862 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13863 ppd->link_width_downgrade_supported =
13864 ppd->link_width_supported;
13865 /* start out enabling only 4X */
13866 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13867 ppd->link_width_downgrade_enabled =
13868 ppd->link_width_downgrade_supported;
13869 /* link width active is 0 when link is down */
13870 /* link width downgrade active is 0 when link is down */
13871
13872 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13873 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13874 hfi1_early_err(&pdev->dev,
13875 "Invalid num_vls %u, using %u VLs\n",
13876 num_vls, HFI1_MAX_VLS_SUPPORTED);
13877 num_vls = HFI1_MAX_VLS_SUPPORTED;
13878 }
13879 ppd->vls_supported = num_vls;
13880 ppd->vls_operational = ppd->vls_supported;
13881 /* Set the default MTU. */
13882 for (vl = 0; vl < num_vls; vl++)
13883 dd->vld[vl].mtu = hfi1_max_mtu;
13884 dd->vld[15].mtu = MAX_MAD_PACKET;
13885 /*
13886 * Set the initial values to reasonable default, will be set
13887 * for real when link is up.
13888 */
13889 ppd->lstate = IB_PORT_DOWN;
13890 ppd->overrun_threshold = 0x4;
13891 ppd->phy_error_threshold = 0xf;
13892 ppd->port_crc_mode_enabled = link_crc_mask;
13893 /* initialize supported LTP CRC mode */
13894 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13895 /* initialize enabled LTP CRC mode */
13896 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13897 /* start in offline */
13898 ppd->host_link_state = HLS_DN_OFFLINE;
13899 init_vl_arb_caches(ppd);
13900 }
13901
13902 dd->link_default = HLS_DN_POLL;
13903
13904 /*
13905 * Do remaining PCIe setup and save PCIe values in dd.
13906 * Any error printing is already done by the init code.
13907 * On return, we have the chip mapped.
13908 */
13909 ret = hfi1_pcie_ddinit(dd, pdev, ent);
13910 if (ret < 0)
13911 goto bail_free;
13912
13913 /* verify that reads actually work, save revision for reset check */
13914 dd->revision = read_csr(dd, CCE_REVISION);
13915 if (dd->revision == ~(u64)0) {
13916 dd_dev_err(dd, "cannot read chip CSRs\n");
13917 ret = -EINVAL;
13918 goto bail_cleanup;
13919 }
13920 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13921 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13922 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13923 & CCE_REVISION_CHIP_REV_MINOR_MASK;
13924
13925 /* obtain the hardware ID - NOT related to unit, which is a
13926 software enumeration */
13927 reg = read_csr(dd, CCE_REVISION2);
13928 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13929 & CCE_REVISION2_HFI_ID_MASK;
13930 /* the variable size will remove unwanted bits */
13931 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13932 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13933 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13934 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13935 (int)dd->irev);
13936
13937 /* speeds the hardware can support */
13938 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13939 /* speeds allowed to run at */
13940 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13941 /* give a reasonable active value, will be set on link up */
13942 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13943
13944 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13945 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13946 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13947 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13948 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13949 /* fix up link widths for emulation _p */
13950 ppd = dd->pport;
13951 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13952 ppd->link_width_supported =
13953 ppd->link_width_enabled =
13954 ppd->link_width_downgrade_supported =
13955 ppd->link_width_downgrade_enabled =
13956 OPA_LINK_WIDTH_1X;
13957 }
13958 /* insure num_vls isn't larger than number of sdma engines */
13959 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
13960 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050013961 num_vls, dd->chip_sdma_engines);
13962 num_vls = dd->chip_sdma_engines;
13963 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013964 }
13965
13966 /*
13967 * Convert the ns parameter to the 64 * cclocks used in the CSR.
13968 * Limit the max if larger than the field holds. If timeout is
13969 * non-zero, then the calculated field will be at least 1.
13970 *
13971 * Must be after icode is set up - the cclock rate depends
13972 * on knowing the hardware being used.
13973 */
13974 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
13975 if (dd->rcv_intr_timeout_csr >
13976 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
13977 dd->rcv_intr_timeout_csr =
13978 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
13979 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
13980 dd->rcv_intr_timeout_csr = 1;
13981
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013982 /* needs to be done before we look for the peer device */
13983 read_guid(dd);
13984
13985 /* should this device init the ASIC block? */
13986 asic_should_init(dd);
13987
Mike Marciniszyn77241052015-07-30 15:17:43 -040013988 /* obtain chip sizes, reset chip CSRs */
13989 init_chip(dd);
13990
13991 /* read in the PCIe link speed information */
13992 ret = pcie_speeds(dd);
13993 if (ret)
13994 goto bail_cleanup;
13995
Mike Marciniszyn77241052015-07-30 15:17:43 -040013996 /* read in firmware */
13997 ret = hfi1_firmware_init(dd);
13998 if (ret)
13999 goto bail_cleanup;
14000
14001 /*
14002 * In general, the PCIe Gen3 transition must occur after the
14003 * chip has been idled (so it won't initiate any PCIe transactions
14004 * e.g. an interrupt) and before the driver changes any registers
14005 * (the transition will reset the registers).
14006 *
14007 * In particular, place this call after:
14008 * - init_chip() - the chip will not initiate any PCIe transactions
14009 * - pcie_speeds() - reads the current link speed
14010 * - hfi1_firmware_init() - the needed firmware is ready to be
14011 * downloaded
14012 */
14013 ret = do_pcie_gen3_transition(dd);
14014 if (ret)
14015 goto bail_cleanup;
14016
14017 /* start setting dd values and adjusting CSRs */
14018 init_early_variables(dd);
14019
14020 parse_platform_config(dd);
14021
Dean Luick5d9157a2015-11-16 21:59:34 -050014022 ret = obtain_boardname(dd);
14023 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014024 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014025
14026 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014027 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014028 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014029 (u32)dd->majrev,
14030 (u32)dd->minrev,
14031 (dd->revision >> CCE_REVISION_SW_SHIFT)
14032 & CCE_REVISION_SW_MASK);
14033
14034 ret = set_up_context_variables(dd);
14035 if (ret)
14036 goto bail_cleanup;
14037
14038 /* set initial RXE CSRs */
14039 init_rxe(dd);
14040 /* set initial TXE CSRs */
14041 init_txe(dd);
14042 /* set initial non-RXE, non-TXE CSRs */
14043 init_other(dd);
14044 /* set up KDETH QP prefix in both RX and TX CSRs */
14045 init_kdeth_qp(dd);
14046
14047 /* send contexts must be set up before receive contexts */
14048 ret = init_send_contexts(dd);
14049 if (ret)
14050 goto bail_cleanup;
14051
14052 ret = hfi1_create_ctxts(dd);
14053 if (ret)
14054 goto bail_cleanup;
14055
14056 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14057 /*
14058 * rcd[0] is guaranteed to be valid by this point. Also, all
14059 * context are using the same value, as per the module parameter.
14060 */
14061 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14062
14063 ret = init_pervl_scs(dd);
14064 if (ret)
14065 goto bail_cleanup;
14066
14067 /* sdma init */
14068 for (i = 0; i < dd->num_pports; ++i) {
14069 ret = sdma_init(dd, i);
14070 if (ret)
14071 goto bail_cleanup;
14072 }
14073
14074 /* use contexts created by hfi1_create_ctxts */
14075 ret = set_up_interrupts(dd);
14076 if (ret)
14077 goto bail_cleanup;
14078
14079 /* set up LCB access - must be after set_up_interrupts() */
14080 init_lcb_access(dd);
14081
14082 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14083 dd->base_guid & 0xFFFFFF);
14084
14085 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14086 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14087 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14088
14089 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14090 if (ret)
14091 goto bail_clear_intr;
14092 check_fabric_firmware_versions(dd);
14093
14094 thermal_init(dd);
14095
14096 ret = init_cntrs(dd);
14097 if (ret)
14098 goto bail_clear_intr;
14099
14100 ret = init_rcverr(dd);
14101 if (ret)
14102 goto bail_free_cntrs;
14103
14104 ret = eprom_init(dd);
14105 if (ret)
14106 goto bail_free_rcverr;
14107
14108 goto bail;
14109
14110bail_free_rcverr:
14111 free_rcverr(dd);
14112bail_free_cntrs:
14113 free_cntrs(dd);
14114bail_clear_intr:
14115 clean_up_interrupts(dd);
14116bail_cleanup:
14117 hfi1_pcie_ddcleanup(dd);
14118bail_free:
14119 hfi1_free_devdata(dd);
14120 dd = ERR_PTR(ret);
14121bail:
14122 return dd;
14123}
14124
14125static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14126 u32 dw_len)
14127{
14128 u32 delta_cycles;
14129 u32 current_egress_rate = ppd->current_egress_rate;
14130 /* rates here are in units of 10^6 bits/sec */
14131
14132 if (desired_egress_rate == -1)
14133 return 0; /* shouldn't happen */
14134
14135 if (desired_egress_rate >= current_egress_rate)
14136 return 0; /* we can't help go faster, only slower */
14137
14138 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14139 egress_cycles(dw_len * 4, current_egress_rate);
14140
14141 return (u16)delta_cycles;
14142}
14143
14144
14145/**
14146 * create_pbc - build a pbc for transmission
14147 * @flags: special case flags or-ed in built pbc
14148 * @srate: static rate
14149 * @vl: vl
14150 * @dwlen: dword length (header words + data words + pbc words)
14151 *
14152 * Create a PBC with the given flags, rate, VL, and length.
14153 *
14154 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14155 * for verbs, which does not use this PSM feature. The lone other caller
14156 * is for the diagnostic interface which calls this if the user does not
14157 * supply their own PBC.
14158 */
14159u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14160 u32 dw_len)
14161{
14162 u64 pbc, delay = 0;
14163
14164 if (unlikely(srate_mbs))
14165 delay = delay_cycles(ppd, srate_mbs, dw_len);
14166
14167 pbc = flags
14168 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14169 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14170 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14171 | (dw_len & PBC_LENGTH_DWS_MASK)
14172 << PBC_LENGTH_DWS_SHIFT;
14173
14174 return pbc;
14175}
14176
14177#define SBUS_THERMAL 0x4f
14178#define SBUS_THERM_MONITOR_MODE 0x1
14179
14180#define THERM_FAILURE(dev, ret, reason) \
14181 dd_dev_err((dd), \
14182 "Thermal sensor initialization failed: %s (%d)\n", \
14183 (reason), (ret))
14184
14185/*
14186 * Initialize the Avago Thermal sensor.
14187 *
14188 * After initialization, enable polling of thermal sensor through
14189 * SBus interface. In order for this to work, the SBus Master
14190 * firmware has to be loaded due to the fact that the HW polling
14191 * logic uses SBus interrupts, which are not supported with
14192 * default firmware. Otherwise, no data will be returned through
14193 * the ASIC_STS_THERM CSR.
14194 */
14195static int thermal_init(struct hfi1_devdata *dd)
14196{
14197 int ret = 0;
14198
14199 if (dd->icode != ICODE_RTL_SILICON ||
14200 !(dd->flags & HFI1_DO_INIT_ASIC))
14201 return ret;
14202
14203 acquire_hw_mutex(dd);
14204 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014205 /* Disable polling of thermal readings */
14206 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14207 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014208 /* Thermal Sensor Initialization */
14209 /* Step 1: Reset the Thermal SBus Receiver */
14210 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14211 RESET_SBUS_RECEIVER, 0);
14212 if (ret) {
14213 THERM_FAILURE(dd, ret, "Bus Reset");
14214 goto done;
14215 }
14216 /* Step 2: Set Reset bit in Thermal block */
14217 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14218 WRITE_SBUS_RECEIVER, 0x1);
14219 if (ret) {
14220 THERM_FAILURE(dd, ret, "Therm Block Reset");
14221 goto done;
14222 }
14223 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14224 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14225 WRITE_SBUS_RECEIVER, 0x32);
14226 if (ret) {
14227 THERM_FAILURE(dd, ret, "Write Clock Div");
14228 goto done;
14229 }
14230 /* Step 4: Select temperature mode */
14231 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14232 WRITE_SBUS_RECEIVER,
14233 SBUS_THERM_MONITOR_MODE);
14234 if (ret) {
14235 THERM_FAILURE(dd, ret, "Write Mode Sel");
14236 goto done;
14237 }
14238 /* Step 5: De-assert block reset and start conversion */
14239 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14240 WRITE_SBUS_RECEIVER, 0x2);
14241 if (ret) {
14242 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14243 goto done;
14244 }
14245 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14246 msleep(22);
14247
14248 /* Enable polling of thermal readings */
14249 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14250done:
14251 release_hw_mutex(dd);
14252 return ret;
14253}
14254
14255static void handle_temp_err(struct hfi1_devdata *dd)
14256{
14257 struct hfi1_pportdata *ppd = &dd->pport[0];
14258 /*
14259 * Thermal Critical Interrupt
14260 * Put the device into forced freeze mode, take link down to
14261 * offline, and put DC into reset.
14262 */
14263 dd_dev_emerg(dd,
14264 "Critical temperature reached! Forcing device into freeze mode!\n");
14265 dd->flags |= HFI1_FORCED_FREEZE;
14266 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14267 /*
14268 * Shut DC down as much and as quickly as possible.
14269 *
14270 * Step 1: Take the link down to OFFLINE. This will cause the
14271 * 8051 to put the Serdes in reset. However, we don't want to
14272 * go through the entire link state machine since we want to
14273 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14274 * but rather an attempt to save the chip.
14275 * Code below is almost the same as quiet_serdes() but avoids
14276 * all the extra work and the sleeps.
14277 */
14278 ppd->driver_link_ready = 0;
14279 ppd->link_enabled = 0;
14280 set_physical_link_state(dd, PLS_OFFLINE |
14281 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14282 /*
14283 * Step 2: Shutdown LCB and 8051
14284 * After shutdown, do not restore DC_CFG_RESET value.
14285 */
14286 dc_shutdown(dd);
14287}