blob: 41af05ec0ff7c790142ba5702b85bfd8b036fc86 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500127#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
424 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
427 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
513/*
514 * TXE Egress Error flags
515 */
516#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
517static struct flag_table egress_err_status_flags[] = {
518/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
519/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
520/* 2 reserved */
521/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
522 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
523/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
524/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
525/* 6 reserved */
526/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
527 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
528/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
529 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
530/* 9-10 reserved */
531/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
532 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
533/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
534/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
535/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
536/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
537/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
538 SEES(TX_SDMA0_DISALLOWED_PACKET)),
539/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
540 SEES(TX_SDMA1_DISALLOWED_PACKET)),
541/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
542 SEES(TX_SDMA2_DISALLOWED_PACKET)),
543/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
544 SEES(TX_SDMA3_DISALLOWED_PACKET)),
545/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
546 SEES(TX_SDMA4_DISALLOWED_PACKET)),
547/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
548 SEES(TX_SDMA5_DISALLOWED_PACKET)),
549/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
550 SEES(TX_SDMA6_DISALLOWED_PACKET)),
551/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
552 SEES(TX_SDMA7_DISALLOWED_PACKET)),
553/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
554 SEES(TX_SDMA8_DISALLOWED_PACKET)),
555/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
556 SEES(TX_SDMA9_DISALLOWED_PACKET)),
557/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
558 SEES(TX_SDMA10_DISALLOWED_PACKET)),
559/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
560 SEES(TX_SDMA11_DISALLOWED_PACKET)),
561/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
562 SEES(TX_SDMA12_DISALLOWED_PACKET)),
563/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
564 SEES(TX_SDMA13_DISALLOWED_PACKET)),
565/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
566 SEES(TX_SDMA14_DISALLOWED_PACKET)),
567/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
568 SEES(TX_SDMA15_DISALLOWED_PACKET)),
569/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
570 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
571/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
572 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
573/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
574 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
575/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
577/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
579/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
581/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
583/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
585/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
587/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
588/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
589/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
590/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
591/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
592/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
593/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
594/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
595/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
596/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
597/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
598/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
599/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
600/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
601/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
602/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
603/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
604/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
605/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
606/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
607/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
608/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
609 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
610/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
611 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
612};
613
614/*
615 * TXE Egress Error Info flags
616 */
617#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
618static struct flag_table egress_err_info_flags[] = {
619/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
620/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
621/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
622/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
623/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
624/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
625/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
626/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
627/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
628/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
629/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
630/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
631/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
632/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
633/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
634/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
635/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
636/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
637/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
638/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
639/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
640/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
641};
642
643/* TXE Egress errors that cause an SPC freeze */
644#define ALL_TXE_EGRESS_FREEZE_ERR \
645 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
646 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
647 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
648 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
649 | SEES(TX_LAUNCH_CSR_PARITY) \
650 | SEES(TX_SBRD_CTL_CSR_PARITY) \
651 | SEES(TX_CONFIG_PARITY) \
652 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
653 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
654 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
655 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
656 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
657 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
658 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
661 | SEES(TX_CREDIT_RETURN_PARITY))
662
663/*
664 * TXE Send error flags
665 */
666#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
667static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500668/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400669/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
670/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
671};
672
673/*
674 * TXE Send Context Error flags and consequences
675 */
676static struct flag_table sc_err_status_flags[] = {
677/* 0*/ FLAG_ENTRY("InconsistentSop",
678 SEC_PACKET_DROPPED | SEC_SC_HALTED,
679 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
680/* 1*/ FLAG_ENTRY("DisallowedPacket",
681 SEC_PACKET_DROPPED | SEC_SC_HALTED,
682 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
683/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
684 SEC_WRITE_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
686/* 3*/ FLAG_ENTRY("WriteOverflow",
687 SEC_WRITE_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
689/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
692/* 5-63 reserved*/
693};
694
695/*
696 * RXE Receive Error flags
697 */
698#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
699static struct flag_table rxe_err_status_flags[] = {
700/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
701/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
702/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
703/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
704/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
705/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
706/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
707/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
708/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
709/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
710/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
711/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
712/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
713/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
714/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
715/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
716/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
717 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
718/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
719/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
720/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
721 RXES(RBUF_BLOCK_LIST_READ_UNC)),
722/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
723 RXES(RBUF_BLOCK_LIST_READ_COR)),
724/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
725 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
726/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
727 RXES(RBUF_CSR_QENT_CNT_PARITY)),
728/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
729 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
730/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
731 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
732/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
733/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
734/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
735 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
736/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
737/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
738/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
739/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
740/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
741/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
742/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
743/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
744 RXES(RBUF_FL_INITDONE_PARITY)),
745/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
746 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
747/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
748/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
749/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
750/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
751 RXES(LOOKUP_DES_PART1_UNC_COR)),
752/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
753 RXES(LOOKUP_DES_PART2_PARITY)),
754/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
755/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
756/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
757/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
758/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
759/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
760/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
761/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
762/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
763/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
764/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
765/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
766/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
767/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
768/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
769/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
770/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
771/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
772/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
773/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
774/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
775/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
776};
777
778/* RXE errors that will trigger an SPC freeze */
779#define ALL_RXE_FREEZE_ERR \
780 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
781 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
782 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
783 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
784 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
785 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
786 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
824
825#define RXE_FREEZE_ABORT_MASK \
826 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
827 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
828 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
829
830/*
831 * DCC Error Flags
832 */
833#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
834static struct flag_table dcc_err_flags[] = {
835 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
836 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
837 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
838 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
839 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
840 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
841 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
842 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
843 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
844 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
845 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
846 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
847 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
848 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
849 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
850 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
851 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
852 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
853 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
854 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
855 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
856 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
857 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
858 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
859 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
860 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
861 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
862 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
863 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
864 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
865 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
866 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
867 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
868 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
869 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
870 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
871 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
872 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
873 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
874 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
875 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
876 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
877 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
878 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
879 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
880 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
881};
882
883/*
884 * LCB error flags
885 */
886#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
887static struct flag_table lcb_err_flags[] = {
888/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
889/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
890/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
891/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
892 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
893/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
894/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
895/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
896/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
897/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
898/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
899/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
900/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
901/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
902/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
903 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
904/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
905/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
906/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
907/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
908/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
909/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
910 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
911/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
912/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
913/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
914/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
915/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
916/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
917/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
918 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
919/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
920/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
921 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
922/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
923 LCBE(REDUNDANT_FLIT_PARITY_ERR))
924};
925
926/*
927 * DC8051 Error Flags
928 */
929#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
930static struct flag_table dc8051_err_flags[] = {
931 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
932 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
933 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
934 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
935 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
936 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
937 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
938 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
939 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
940 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
941 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
942};
943
944/*
945 * DC8051 Information Error flags
946 *
947 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
948 */
949static struct flag_table dc8051_info_err_flags[] = {
950 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
951 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
952 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
953 FLAG_ENTRY0("Serdes internal loopback failure",
954 FAILED_SERDES_INTERNAL_LOOPBACK),
955 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
956 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
957 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
958 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
959 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
960 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
961 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
962 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
963};
964
965/*
966 * DC8051 Information Host Information flags
967 *
968 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
969 */
970static struct flag_table dc8051_info_host_msg_flags[] = {
971 FLAG_ENTRY0("Host request done", 0x0001),
972 FLAG_ENTRY0("BC SMA message", 0x0002),
973 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
974 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
975 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
976 FLAG_ENTRY0("External device config request", 0x0020),
977 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
978 FLAG_ENTRY0("LinkUp achieved", 0x0080),
979 FLAG_ENTRY0("Link going down", 0x0100),
980};
981
982
983static u32 encoded_size(u32 size);
984static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
985static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
986static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
987 u8 *continuous);
988static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
989 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
990static void read_vc_remote_link_width(struct hfi1_devdata *dd,
991 u8 *remote_tx_rate, u16 *link_widths);
992static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
993 u8 *flag_bits, u16 *link_widths);
994static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
995 u8 *device_rev);
996static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
997static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
998static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
999 u8 *tx_polarity_inversion,
1000 u8 *rx_polarity_inversion, u8 *max_rate);
1001static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1002 unsigned int context, u64 err_status);
1003static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1004static void handle_dcc_err(struct hfi1_devdata *dd,
1005 unsigned int context, u64 err_status);
1006static void handle_lcb_err(struct hfi1_devdata *dd,
1007 unsigned int context, u64 err_status);
1008static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1009static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1010static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1011static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void set_partition_keys(struct hfi1_pportdata *);
1017static const char *link_state_name(u32 state);
1018static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1019 u32 state);
1020static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1021 u64 *out_data);
1022static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1023static int thermal_init(struct hfi1_devdata *dd);
1024
1025static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1026 int msecs);
1027static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1028static void handle_temp_err(struct hfi1_devdata *);
1029static void dc_shutdown(struct hfi1_devdata *);
1030static void dc_start(struct hfi1_devdata *);
1031
1032/*
1033 * Error interrupt table entry. This is used as input to the interrupt
1034 * "clear down" routine used for all second tier error interrupt register.
1035 * Second tier interrupt registers have a single bit representing them
1036 * in the top-level CceIntStatus.
1037 */
1038struct err_reg_info {
1039 u32 status; /* status CSR offset */
1040 u32 clear; /* clear CSR offset */
1041 u32 mask; /* mask CSR offset */
1042 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1043 const char *desc;
1044};
1045
1046#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1047#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1048#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1049
1050/*
1051 * Helpers for building HFI and DC error interrupt table entries. Different
1052 * helpers are needed because of inconsistent register names.
1053 */
1054#define EE(reg, handler, desc) \
1055 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1056 handler, desc }
1057#define DC_EE1(reg, handler, desc) \
1058 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1059#define DC_EE2(reg, handler, desc) \
1060 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1061
1062/*
1063 * Table of the "misc" grouping of error interrupts. Each entry refers to
1064 * another register containing more information.
1065 */
1066static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1067/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1068/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1069/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1070/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1071/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1072/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1073/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1074/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1075 /* the rest are reserved */
1076};
1077
1078/*
1079 * Index into the Various section of the interrupt sources
1080 * corresponding to the Critical Temperature interrupt.
1081 */
1082#define TCRIT_INT_SOURCE 4
1083
1084/*
1085 * SDMA error interrupt entry - refers to another register containing more
1086 * information.
1087 */
1088static const struct err_reg_info sdma_eng_err =
1089 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1090
1091static const struct err_reg_info various_err[NUM_VARIOUS] = {
1092/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1093/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1094/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1095/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1096/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1097 /* rest are reserved */
1098};
1099
1100/*
1101 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1102 * register can not be derived from the MTU value because 10K is not
1103 * a power of 2. Therefore, we need a constant. Everything else can
1104 * be calculated.
1105 */
1106#define DCC_CFG_PORT_MTU_CAP_10240 7
1107
1108/*
1109 * Table of the DC grouping of error interrupts. Each entry refers to
1110 * another register containing more information.
1111 */
1112static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1113/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1114/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1115/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1116/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1117 /* the rest are reserved */
1118};
1119
1120struct cntr_entry {
1121 /*
1122 * counter name
1123 */
1124 char *name;
1125
1126 /*
1127 * csr to read for name (if applicable)
1128 */
1129 u64 csr;
1130
1131 /*
1132 * offset into dd or ppd to store the counter's value
1133 */
1134 int offset;
1135
1136 /*
1137 * flags
1138 */
1139 u8 flags;
1140
1141 /*
1142 * accessor for stat element, context either dd or ppd
1143 */
1144 u64 (*rw_cntr)(const struct cntr_entry *,
1145 void *context,
1146 int vl,
1147 int mode,
1148 u64 data);
1149};
1150
1151#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1152#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1153
1154#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1155{ \
1156 name, \
1157 csr, \
1158 offset, \
1159 flags, \
1160 accessor \
1161}
1162
1163/* 32bit RXE */
1164#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1165CNTR_ELEM(#name, \
1166 (counter * 8 + RCV_COUNTER_ARRAY32), \
1167 0, flags | CNTR_32BIT, \
1168 port_access_u32_csr)
1169
1170#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 dev_access_u32_csr)
1175
1176/* 64bit RXE */
1177#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1178CNTR_ELEM(#name, \
1179 (counter * 8 + RCV_COUNTER_ARRAY64), \
1180 0, flags, \
1181 port_access_u64_csr)
1182
1183#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 dev_access_u64_csr)
1188
1189#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1190#define OVR_ELM(ctx) \
1191CNTR_ELEM("RcvHdrOvr" #ctx, \
1192 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1193 0, CNTR_NORMAL, port_access_u64_csr)
1194
1195/* 32bit TXE */
1196#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1197CNTR_ELEM(#name, \
1198 (counter * 8 + SEND_COUNTER_ARRAY32), \
1199 0, flags | CNTR_32BIT, \
1200 port_access_u32_csr)
1201
1202/* 64bit TXE */
1203#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1204CNTR_ELEM(#name, \
1205 (counter * 8 + SEND_COUNTER_ARRAY64), \
1206 0, flags, \
1207 port_access_u64_csr)
1208
1209# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name,\
1211 counter * 8 + SEND_COUNTER_ARRAY64, \
1212 0, \
1213 flags, \
1214 dev_access_u64_csr)
1215
1216/* CCE */
1217#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1218CNTR_ELEM(#name, \
1219 (counter * 8 + CCE_COUNTER_ARRAY32), \
1220 0, flags | CNTR_32BIT, \
1221 dev_access_u32_csr)
1222
1223#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229/* DC */
1230#define DC_PERF_CNTR(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 counter, \
1233 0, \
1234 flags, \
1235 dev_access_u64_csr)
1236
1237#define DC_PERF_CNTR_LCB(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 counter, \
1240 0, \
1241 flags, \
1242 dc_access_lcb_cntr)
1243
1244/* ibp counters */
1245#define SW_IBP_CNTR(name, cntr) \
1246CNTR_ELEM(#name, \
1247 0, \
1248 0, \
1249 CNTR_SYNTH, \
1250 access_ibp_##cntr)
1251
1252u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1253{
1254 u64 val;
1255
1256 if (dd->flags & HFI1_PRESENT) {
1257 val = readq((void __iomem *)dd->kregbase + offset);
1258 return val;
1259 }
1260 return -1;
1261}
1262
1263void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1264{
1265 if (dd->flags & HFI1_PRESENT)
1266 writeq(value, (void __iomem *)dd->kregbase + offset);
1267}
1268
1269void __iomem *get_csr_addr(
1270 struct hfi1_devdata *dd,
1271 u32 offset)
1272{
1273 return (void __iomem *)dd->kregbase + offset;
1274}
1275
1276static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1277 int mode, u64 value)
1278{
1279 u64 ret;
1280
1281
1282 if (mode == CNTR_MODE_R) {
1283 ret = read_csr(dd, csr);
1284 } else if (mode == CNTR_MODE_W) {
1285 write_csr(dd, csr, value);
1286 ret = value;
1287 } else {
1288 dd_dev_err(dd, "Invalid cntr register access mode");
1289 return 0;
1290 }
1291
1292 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1293 return ret;
1294}
1295
1296/* Dev Access */
1297static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1298 void *context, int vl, int mode, u64 data)
1299{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301300 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001301 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001302
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001303 if (entry->flags & CNTR_SDMA) {
1304 if (vl == CNTR_INVALID_VL)
1305 return 0;
1306 csr += 0x100 * vl;
1307 } else {
1308 if (vl != CNTR_INVALID_VL)
1309 return 0;
1310 }
1311 return read_write_csr(dd, csr, mode, data);
1312}
1313
1314static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1315 void *context, int idx, int mode, u64 data)
1316{
1317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1318
1319 if (dd->per_sdma && idx < dd->num_sdma)
1320 return dd->per_sdma[idx].err_cnt;
1321 return 0;
1322}
1323
1324static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1325 void *context, int idx, int mode, u64 data)
1326{
1327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328
1329 if (dd->per_sdma && idx < dd->num_sdma)
1330 return dd->per_sdma[idx].sdma_int_cnt;
1331 return 0;
1332}
1333
1334static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1335 void *context, int idx, int mode, u64 data)
1336{
1337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338
1339 if (dd->per_sdma && idx < dd->num_sdma)
1340 return dd->per_sdma[idx].idle_int_cnt;
1341 return 0;
1342}
1343
1344static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1345 void *context, int idx, int mode,
1346 u64 data)
1347{
1348 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1349
1350 if (dd->per_sdma && idx < dd->num_sdma)
1351 return dd->per_sdma[idx].progress_int_cnt;
1352 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001353}
1354
1355static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1356 int vl, int mode, u64 data)
1357{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301358 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359
1360 u64 val = 0;
1361 u64 csr = entry->csr;
1362
1363 if (entry->flags & CNTR_VL) {
1364 if (vl == CNTR_INVALID_VL)
1365 return 0;
1366 csr += 8 * vl;
1367 } else {
1368 if (vl != CNTR_INVALID_VL)
1369 return 0;
1370 }
1371
1372 val = read_write_csr(dd, csr, mode, data);
1373 return val;
1374}
1375
1376static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1377 int vl, int mode, u64 data)
1378{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301379 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001380 u32 csr = entry->csr;
1381 int ret = 0;
1382
1383 if (vl != CNTR_INVALID_VL)
1384 return 0;
1385 if (mode == CNTR_MODE_R)
1386 ret = read_lcb_csr(dd, csr, &data);
1387 else if (mode == CNTR_MODE_W)
1388 ret = write_lcb_csr(dd, csr, data);
1389
1390 if (ret) {
1391 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1392 return 0;
1393 }
1394
1395 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1396 return data;
1397}
1398
1399/* Port Access */
1400static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1401 int vl, int mode, u64 data)
1402{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301403 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001404
1405 if (vl != CNTR_INVALID_VL)
1406 return 0;
1407 return read_write_csr(ppd->dd, entry->csr, mode, data);
1408}
1409
1410static u64 port_access_u64_csr(const struct cntr_entry *entry,
1411 void *context, int vl, int mode, u64 data)
1412{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301413 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001414 u64 val;
1415 u64 csr = entry->csr;
1416
1417 if (entry->flags & CNTR_VL) {
1418 if (vl == CNTR_INVALID_VL)
1419 return 0;
1420 csr += 8 * vl;
1421 } else {
1422 if (vl != CNTR_INVALID_VL)
1423 return 0;
1424 }
1425 val = read_write_csr(ppd->dd, csr, mode, data);
1426 return val;
1427}
1428
1429/* Software defined */
1430static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1431 u64 data)
1432{
1433 u64 ret;
1434
1435 if (mode == CNTR_MODE_R) {
1436 ret = *cntr;
1437 } else if (mode == CNTR_MODE_W) {
1438 *cntr = data;
1439 ret = data;
1440 } else {
1441 dd_dev_err(dd, "Invalid cntr sw access mode");
1442 return 0;
1443 }
1444
1445 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1446
1447 return ret;
1448}
1449
1450static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1451 int vl, int mode, u64 data)
1452{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301453 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001454
1455 if (vl != CNTR_INVALID_VL)
1456 return 0;
1457 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1458}
1459
1460static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1461 int vl, int mode, u64 data)
1462{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301463 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464
1465 if (vl != CNTR_INVALID_VL)
1466 return 0;
1467 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1468}
1469
Dean Luick6d014532015-12-01 15:38:23 -05001470static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1471 void *context, int vl, int mode,
1472 u64 data)
1473{
1474 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1475
1476 if (vl != CNTR_INVALID_VL)
1477 return 0;
1478 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1479}
1480
Mike Marciniszyn77241052015-07-30 15:17:43 -04001481static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1482 void *context, int vl, int mode, u64 data)
1483{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301484 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001485
1486 if (vl != CNTR_INVALID_VL)
1487 return 0;
1488
1489 return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
1490}
1491
1492static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1493 void *context, int vl, int mode, u64 data)
1494{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301495 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001496
1497 if (vl != CNTR_INVALID_VL)
1498 return 0;
1499
1500 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1501 mode, data);
1502}
1503
1504static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1505 void *context, int vl, int mode, u64 data)
1506{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301507 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001508
1509 if (vl != CNTR_INVALID_VL)
1510 return 0;
1511
1512 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1513 mode, data);
1514}
1515
1516u64 get_all_cpu_total(u64 __percpu *cntr)
1517{
1518 int cpu;
1519 u64 counter = 0;
1520
1521 for_each_possible_cpu(cpu)
1522 counter += *per_cpu_ptr(cntr, cpu);
1523 return counter;
1524}
1525
1526static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1527 u64 __percpu *cntr,
1528 int vl, int mode, u64 data)
1529{
1530
1531 u64 ret = 0;
1532
1533 if (vl != CNTR_INVALID_VL)
1534 return 0;
1535
1536 if (mode == CNTR_MODE_R) {
1537 ret = get_all_cpu_total(cntr) - *z_val;
1538 } else if (mode == CNTR_MODE_W) {
1539 /* A write can only zero the counter */
1540 if (data == 0)
1541 *z_val = get_all_cpu_total(cntr);
1542 else
1543 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1544 } else {
1545 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1546 return 0;
1547 }
1548
1549 return ret;
1550}
1551
1552static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1553 void *context, int vl, int mode, u64 data)
1554{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301555 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001556
1557 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1558 mode, data);
1559}
1560
1561static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1562 void *context, int vl, int mode, u64 data)
1563{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301564 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001565
1566 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1567 mode, data);
1568}
1569
1570static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1571 void *context, int vl, int mode, u64 data)
1572{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301573 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574
1575 return dd->verbs_dev.n_piowait;
1576}
1577
1578static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1579 void *context, int vl, int mode, u64 data)
1580{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301581 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001582
1583 return dd->verbs_dev.n_txwait;
1584}
1585
1586static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301589 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001590
1591 return dd->verbs_dev.n_kmem_wait;
1592}
1593
Dean Luickb4219222015-10-26 10:28:35 -04001594static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
1597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1598
1599 return dd->verbs_dev.n_send_schedule;
1600}
1601
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001602/* Software counters for the error status bits within MISC_ERR_STATUS */
1603static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1604 void *context, int vl, int mode,
1605 u64 data)
1606{
1607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1608
1609 return dd->misc_err_status_cnt[12];
1610}
1611
1612static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1613 void *context, int vl, int mode,
1614 u64 data)
1615{
1616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1617
1618 return dd->misc_err_status_cnt[11];
1619}
1620
1621static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1622 void *context, int vl, int mode,
1623 u64 data)
1624{
1625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1626
1627 return dd->misc_err_status_cnt[10];
1628}
1629
1630static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1631 void *context, int vl,
1632 int mode, u64 data)
1633{
1634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635
1636 return dd->misc_err_status_cnt[9];
1637}
1638
1639static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1640 void *context, int vl, int mode,
1641 u64 data)
1642{
1643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644
1645 return dd->misc_err_status_cnt[8];
1646}
1647
1648static u64 access_misc_efuse_read_bad_addr_err_cnt(
1649 const struct cntr_entry *entry,
1650 void *context, int vl, int mode, u64 data)
1651{
1652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653
1654 return dd->misc_err_status_cnt[7];
1655}
1656
1657static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1658 void *context, int vl,
1659 int mode, u64 data)
1660{
1661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662
1663 return dd->misc_err_status_cnt[6];
1664}
1665
1666static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1667 void *context, int vl, int mode,
1668 u64 data)
1669{
1670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671
1672 return dd->misc_err_status_cnt[5];
1673}
1674
1675static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1676 void *context, int vl, int mode,
1677 u64 data)
1678{
1679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681 return dd->misc_err_status_cnt[4];
1682}
1683
1684static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1685 void *context, int vl,
1686 int mode, u64 data)
1687{
1688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689
1690 return dd->misc_err_status_cnt[3];
1691}
1692
1693static u64 access_misc_csr_write_bad_addr_err_cnt(
1694 const struct cntr_entry *entry,
1695 void *context, int vl, int mode, u64 data)
1696{
1697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698
1699 return dd->misc_err_status_cnt[2];
1700}
1701
1702static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1703 void *context, int vl,
1704 int mode, u64 data)
1705{
1706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708 return dd->misc_err_status_cnt[1];
1709}
1710
1711static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1712 void *context, int vl, int mode,
1713 u64 data)
1714{
1715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717 return dd->misc_err_status_cnt[0];
1718}
1719
1720/*
1721 * Software counter for the aggregate of
1722 * individual CceErrStatus counters
1723 */
1724static u64 access_sw_cce_err_status_aggregated_cnt(
1725 const struct cntr_entry *entry,
1726 void *context, int vl, int mode, u64 data)
1727{
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730 return dd->sw_cce_err_status_aggregate;
1731}
1732
1733/*
1734 * Software counters corresponding to each of the
1735 * error status bits within CceErrStatus
1736 */
1737static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1738 void *context, int vl, int mode,
1739 u64 data)
1740{
1741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1742
1743 return dd->cce_err_status_cnt[40];
1744}
1745
1746static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1747 void *context, int vl, int mode,
1748 u64 data)
1749{
1750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1751
1752 return dd->cce_err_status_cnt[39];
1753}
1754
1755static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1756 void *context, int vl, int mode,
1757 u64 data)
1758{
1759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1760
1761 return dd->cce_err_status_cnt[38];
1762}
1763
1764static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1765 void *context, int vl, int mode,
1766 u64 data)
1767{
1768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769
1770 return dd->cce_err_status_cnt[37];
1771}
1772
1773static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1774 void *context, int vl, int mode,
1775 u64 data)
1776{
1777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778
1779 return dd->cce_err_status_cnt[36];
1780}
1781
1782static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1783 const struct cntr_entry *entry,
1784 void *context, int vl, int mode, u64 data)
1785{
1786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788 return dd->cce_err_status_cnt[35];
1789}
1790
1791static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1792 const struct cntr_entry *entry,
1793 void *context, int vl, int mode, u64 data)
1794{
1795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796
1797 return dd->cce_err_status_cnt[34];
1798}
1799
1800static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1801 void *context, int vl,
1802 int mode, u64 data)
1803{
1804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805
1806 return dd->cce_err_status_cnt[33];
1807}
1808
1809static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1810 void *context, int vl, int mode,
1811 u64 data)
1812{
1813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814
1815 return dd->cce_err_status_cnt[32];
1816}
1817
1818static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1819 void *context, int vl, int mode, u64 data)
1820{
1821 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1822
1823 return dd->cce_err_status_cnt[31];
1824}
1825
1826static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1827 void *context, int vl, int mode,
1828 u64 data)
1829{
1830 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1831
1832 return dd->cce_err_status_cnt[30];
1833}
1834
1835static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1836 void *context, int vl, int mode,
1837 u64 data)
1838{
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841 return dd->cce_err_status_cnt[29];
1842}
1843
1844static u64 access_pcic_transmit_back_parity_err_cnt(
1845 const struct cntr_entry *entry,
1846 void *context, int vl, int mode, u64 data)
1847{
1848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849
1850 return dd->cce_err_status_cnt[28];
1851}
1852
1853static u64 access_pcic_transmit_front_parity_err_cnt(
1854 const struct cntr_entry *entry,
1855 void *context, int vl, int mode, u64 data)
1856{
1857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858
1859 return dd->cce_err_status_cnt[27];
1860}
1861
1862static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1863 void *context, int vl, int mode,
1864 u64 data)
1865{
1866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867
1868 return dd->cce_err_status_cnt[26];
1869}
1870
1871static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1872 void *context, int vl, int mode,
1873 u64 data)
1874{
1875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876
1877 return dd->cce_err_status_cnt[25];
1878}
1879
1880static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1881 void *context, int vl, int mode,
1882 u64 data)
1883{
1884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885
1886 return dd->cce_err_status_cnt[24];
1887}
1888
1889static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1890 void *context, int vl, int mode,
1891 u64 data)
1892{
1893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894
1895 return dd->cce_err_status_cnt[23];
1896}
1897
1898static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1899 void *context, int vl,
1900 int mode, u64 data)
1901{
1902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903
1904 return dd->cce_err_status_cnt[22];
1905}
1906
1907static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1908 void *context, int vl, int mode,
1909 u64 data)
1910{
1911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912
1913 return dd->cce_err_status_cnt[21];
1914}
1915
1916static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1917 const struct cntr_entry *entry,
1918 void *context, int vl, int mode, u64 data)
1919{
1920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921
1922 return dd->cce_err_status_cnt[20];
1923}
1924
1925static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1926 void *context, int vl,
1927 int mode, u64 data)
1928{
1929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930
1931 return dd->cce_err_status_cnt[19];
1932}
1933
1934static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1935 void *context, int vl, int mode,
1936 u64 data)
1937{
1938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939
1940 return dd->cce_err_status_cnt[18];
1941}
1942
1943static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1944 void *context, int vl, int mode,
1945 u64 data)
1946{
1947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948
1949 return dd->cce_err_status_cnt[17];
1950}
1951
1952static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1953 void *context, int vl, int mode,
1954 u64 data)
1955{
1956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957
1958 return dd->cce_err_status_cnt[16];
1959}
1960
1961static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1962 void *context, int vl, int mode,
1963 u64 data)
1964{
1965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966
1967 return dd->cce_err_status_cnt[15];
1968}
1969
1970static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1971 void *context, int vl,
1972 int mode, u64 data)
1973{
1974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975
1976 return dd->cce_err_status_cnt[14];
1977}
1978
1979static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1980 void *context, int vl, int mode,
1981 u64 data)
1982{
1983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984
1985 return dd->cce_err_status_cnt[13];
1986}
1987
1988static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
1989 const struct cntr_entry *entry,
1990 void *context, int vl, int mode, u64 data)
1991{
1992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993
1994 return dd->cce_err_status_cnt[12];
1995}
1996
1997static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
1998 const struct cntr_entry *entry,
1999 void *context, int vl, int mode, u64 data)
2000{
2001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002
2003 return dd->cce_err_status_cnt[11];
2004}
2005
2006static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2007 const struct cntr_entry *entry,
2008 void *context, int vl, int mode, u64 data)
2009{
2010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011
2012 return dd->cce_err_status_cnt[10];
2013}
2014
2015static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2016 const struct cntr_entry *entry,
2017 void *context, int vl, int mode, u64 data)
2018{
2019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020
2021 return dd->cce_err_status_cnt[9];
2022}
2023
2024static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2025 const struct cntr_entry *entry,
2026 void *context, int vl, int mode, u64 data)
2027{
2028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029
2030 return dd->cce_err_status_cnt[8];
2031}
2032
2033static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2034 void *context, int vl,
2035 int mode, u64 data)
2036{
2037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038
2039 return dd->cce_err_status_cnt[7];
2040}
2041
2042static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2043 const struct cntr_entry *entry,
2044 void *context, int vl, int mode, u64 data)
2045{
2046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047
2048 return dd->cce_err_status_cnt[6];
2049}
2050
2051static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2052 void *context, int vl, int mode,
2053 u64 data)
2054{
2055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056
2057 return dd->cce_err_status_cnt[5];
2058}
2059
2060static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2061 void *context, int vl, int mode,
2062 u64 data)
2063{
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066 return dd->cce_err_status_cnt[4];
2067}
2068
2069static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2070 const struct cntr_entry *entry,
2071 void *context, int vl, int mode, u64 data)
2072{
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075 return dd->cce_err_status_cnt[3];
2076}
2077
2078static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl,
2080 int mode, u64 data)
2081{
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084 return dd->cce_err_status_cnt[2];
2085}
2086
2087static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2088 void *context, int vl,
2089 int mode, u64 data)
2090{
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093 return dd->cce_err_status_cnt[1];
2094}
2095
2096static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2097 void *context, int vl, int mode,
2098 u64 data)
2099{
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102 return dd->cce_err_status_cnt[0];
2103}
2104
2105/*
2106 * Software counters corresponding to each of the
2107 * error status bits within RcvErrStatus
2108 */
2109static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2110 void *context, int vl, int mode,
2111 u64 data)
2112{
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115 return dd->rcv_err_status_cnt[63];
2116}
2117
2118static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2119 void *context, int vl,
2120 int mode, u64 data)
2121{
2122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123
2124 return dd->rcv_err_status_cnt[62];
2125}
2126
2127static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2128 void *context, int vl, int mode,
2129 u64 data)
2130{
2131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133 return dd->rcv_err_status_cnt[61];
2134}
2135
2136static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2137 void *context, int vl, int mode,
2138 u64 data)
2139{
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142 return dd->rcv_err_status_cnt[60];
2143}
2144
2145static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2146 void *context, int vl,
2147 int mode, u64 data)
2148{
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151 return dd->rcv_err_status_cnt[59];
2152}
2153
2154static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl,
2156 int mode, u64 data)
2157{
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160 return dd->rcv_err_status_cnt[58];
2161}
2162
2163static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2164 void *context, int vl, int mode,
2165 u64 data)
2166{
2167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169 return dd->rcv_err_status_cnt[57];
2170}
2171
2172static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2173 void *context, int vl, int mode,
2174 u64 data)
2175{
2176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178 return dd->rcv_err_status_cnt[56];
2179}
2180
2181static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2182 void *context, int vl, int mode,
2183 u64 data)
2184{
2185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187 return dd->rcv_err_status_cnt[55];
2188}
2189
2190static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2191 const struct cntr_entry *entry,
2192 void *context, int vl, int mode, u64 data)
2193{
2194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196 return dd->rcv_err_status_cnt[54];
2197}
2198
2199static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2200 const struct cntr_entry *entry,
2201 void *context, int vl, int mode, u64 data)
2202{
2203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205 return dd->rcv_err_status_cnt[53];
2206}
2207
2208static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2209 void *context, int vl,
2210 int mode, u64 data)
2211{
2212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214 return dd->rcv_err_status_cnt[52];
2215}
2216
2217static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2218 void *context, int vl,
2219 int mode, u64 data)
2220{
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223 return dd->rcv_err_status_cnt[51];
2224}
2225
2226static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2227 void *context, int vl,
2228 int mode, u64 data)
2229{
2230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231
2232 return dd->rcv_err_status_cnt[50];
2233}
2234
2235static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2236 void *context, int vl,
2237 int mode, u64 data)
2238{
2239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240
2241 return dd->rcv_err_status_cnt[49];
2242}
2243
2244static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2245 void *context, int vl,
2246 int mode, u64 data)
2247{
2248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249
2250 return dd->rcv_err_status_cnt[48];
2251}
2252
2253static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2254 void *context, int vl,
2255 int mode, u64 data)
2256{
2257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258
2259 return dd->rcv_err_status_cnt[47];
2260}
2261
2262static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2263 void *context, int vl, int mode,
2264 u64 data)
2265{
2266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267
2268 return dd->rcv_err_status_cnt[46];
2269}
2270
2271static u64 access_rx_hq_intr_csr_parity_err_cnt(
2272 const struct cntr_entry *entry,
2273 void *context, int vl, int mode, u64 data)
2274{
2275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276
2277 return dd->rcv_err_status_cnt[45];
2278}
2279
2280static u64 access_rx_lookup_csr_parity_err_cnt(
2281 const struct cntr_entry *entry,
2282 void *context, int vl, int mode, u64 data)
2283{
2284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285
2286 return dd->rcv_err_status_cnt[44];
2287}
2288
2289static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2290 const struct cntr_entry *entry,
2291 void *context, int vl, int mode, u64 data)
2292{
2293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294
2295 return dd->rcv_err_status_cnt[43];
2296}
2297
2298static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2299 const struct cntr_entry *entry,
2300 void *context, int vl, int mode, u64 data)
2301{
2302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303
2304 return dd->rcv_err_status_cnt[42];
2305}
2306
2307static u64 access_rx_lookup_des_part2_parity_err_cnt(
2308 const struct cntr_entry *entry,
2309 void *context, int vl, int mode, u64 data)
2310{
2311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312
2313 return dd->rcv_err_status_cnt[41];
2314}
2315
2316static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2317 const struct cntr_entry *entry,
2318 void *context, int vl, int mode, u64 data)
2319{
2320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321
2322 return dd->rcv_err_status_cnt[40];
2323}
2324
2325static u64 access_rx_lookup_des_part1_unc_err_cnt(
2326 const struct cntr_entry *entry,
2327 void *context, int vl, int mode, u64 data)
2328{
2329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330
2331 return dd->rcv_err_status_cnt[39];
2332}
2333
2334static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2335 const struct cntr_entry *entry,
2336 void *context, int vl, int mode, u64 data)
2337{
2338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339
2340 return dd->rcv_err_status_cnt[38];
2341}
2342
2343static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2344 const struct cntr_entry *entry,
2345 void *context, int vl, int mode, u64 data)
2346{
2347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348
2349 return dd->rcv_err_status_cnt[37];
2350}
2351
2352static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2353 const struct cntr_entry *entry,
2354 void *context, int vl, int mode, u64 data)
2355{
2356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357
2358 return dd->rcv_err_status_cnt[36];
2359}
2360
2361static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2362 const struct cntr_entry *entry,
2363 void *context, int vl, int mode, u64 data)
2364{
2365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366
2367 return dd->rcv_err_status_cnt[35];
2368}
2369
2370static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2371 const struct cntr_entry *entry,
2372 void *context, int vl, int mode, u64 data)
2373{
2374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375
2376 return dd->rcv_err_status_cnt[34];
2377}
2378
2379static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2380 const struct cntr_entry *entry,
2381 void *context, int vl, int mode, u64 data)
2382{
2383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384
2385 return dd->rcv_err_status_cnt[33];
2386}
2387
2388static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2389 void *context, int vl, int mode,
2390 u64 data)
2391{
2392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393
2394 return dd->rcv_err_status_cnt[32];
2395}
2396
2397static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2398 void *context, int vl, int mode,
2399 u64 data)
2400{
2401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402
2403 return dd->rcv_err_status_cnt[31];
2404}
2405
2406static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2407 void *context, int vl, int mode,
2408 u64 data)
2409{
2410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411
2412 return dd->rcv_err_status_cnt[30];
2413}
2414
2415static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2416 void *context, int vl, int mode,
2417 u64 data)
2418{
2419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420
2421 return dd->rcv_err_status_cnt[29];
2422}
2423
2424static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2425 void *context, int vl,
2426 int mode, u64 data)
2427{
2428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429
2430 return dd->rcv_err_status_cnt[28];
2431}
2432
2433static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2434 const struct cntr_entry *entry,
2435 void *context, int vl, int mode, u64 data)
2436{
2437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438
2439 return dd->rcv_err_status_cnt[27];
2440}
2441
2442static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2443 const struct cntr_entry *entry,
2444 void *context, int vl, int mode, u64 data)
2445{
2446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447
2448 return dd->rcv_err_status_cnt[26];
2449}
2450
2451static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2452 const struct cntr_entry *entry,
2453 void *context, int vl, int mode, u64 data)
2454{
2455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456
2457 return dd->rcv_err_status_cnt[25];
2458}
2459
2460static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2461 const struct cntr_entry *entry,
2462 void *context, int vl, int mode, u64 data)
2463{
2464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465
2466 return dd->rcv_err_status_cnt[24];
2467}
2468
2469static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2470 const struct cntr_entry *entry,
2471 void *context, int vl, int mode, u64 data)
2472{
2473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474
2475 return dd->rcv_err_status_cnt[23];
2476}
2477
2478static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2479 const struct cntr_entry *entry,
2480 void *context, int vl, int mode, u64 data)
2481{
2482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483
2484 return dd->rcv_err_status_cnt[22];
2485}
2486
2487static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2488 const struct cntr_entry *entry,
2489 void *context, int vl, int mode, u64 data)
2490{
2491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492
2493 return dd->rcv_err_status_cnt[21];
2494}
2495
2496static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2497 const struct cntr_entry *entry,
2498 void *context, int vl, int mode, u64 data)
2499{
2500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501
2502 return dd->rcv_err_status_cnt[20];
2503}
2504
2505static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2506 const struct cntr_entry *entry,
2507 void *context, int vl, int mode, u64 data)
2508{
2509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510
2511 return dd->rcv_err_status_cnt[19];
2512}
2513
2514static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2515 void *context, int vl,
2516 int mode, u64 data)
2517{
2518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519
2520 return dd->rcv_err_status_cnt[18];
2521}
2522
2523static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2524 void *context, int vl,
2525 int mode, u64 data)
2526{
2527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528
2529 return dd->rcv_err_status_cnt[17];
2530}
2531
2532static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2533 const struct cntr_entry *entry,
2534 void *context, int vl, int mode, u64 data)
2535{
2536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537
2538 return dd->rcv_err_status_cnt[16];
2539}
2540
2541static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2542 const struct cntr_entry *entry,
2543 void *context, int vl, int mode, u64 data)
2544{
2545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546
2547 return dd->rcv_err_status_cnt[15];
2548}
2549
2550static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2551 void *context, int vl,
2552 int mode, u64 data)
2553{
2554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555
2556 return dd->rcv_err_status_cnt[14];
2557}
2558
2559static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2560 void *context, int vl,
2561 int mode, u64 data)
2562{
2563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564
2565 return dd->rcv_err_status_cnt[13];
2566}
2567
2568static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2569 void *context, int vl, int mode,
2570 u64 data)
2571{
2572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573
2574 return dd->rcv_err_status_cnt[12];
2575}
2576
2577static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2578 void *context, int vl, int mode,
2579 u64 data)
2580{
2581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582
2583 return dd->rcv_err_status_cnt[11];
2584}
2585
2586static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2587 void *context, int vl, int mode,
2588 u64 data)
2589{
2590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591
2592 return dd->rcv_err_status_cnt[10];
2593}
2594
2595static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2596 void *context, int vl, int mode,
2597 u64 data)
2598{
2599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600
2601 return dd->rcv_err_status_cnt[9];
2602}
2603
2604static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2605 void *context, int vl, int mode,
2606 u64 data)
2607{
2608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609
2610 return dd->rcv_err_status_cnt[8];
2611}
2612
2613static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2614 const struct cntr_entry *entry,
2615 void *context, int vl, int mode, u64 data)
2616{
2617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618
2619 return dd->rcv_err_status_cnt[7];
2620}
2621
2622static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2623 const struct cntr_entry *entry,
2624 void *context, int vl, int mode, u64 data)
2625{
2626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627
2628 return dd->rcv_err_status_cnt[6];
2629}
2630
2631static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2632 void *context, int vl, int mode,
2633 u64 data)
2634{
2635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636
2637 return dd->rcv_err_status_cnt[5];
2638}
2639
2640static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2641 void *context, int vl, int mode,
2642 u64 data)
2643{
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646 return dd->rcv_err_status_cnt[4];
2647}
2648
2649static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2650 void *context, int vl, int mode,
2651 u64 data)
2652{
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655 return dd->rcv_err_status_cnt[3];
2656}
2657
2658static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2659 void *context, int vl, int mode,
2660 u64 data)
2661{
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664 return dd->rcv_err_status_cnt[2];
2665}
2666
2667static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2668 void *context, int vl, int mode,
2669 u64 data)
2670{
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673 return dd->rcv_err_status_cnt[1];
2674}
2675
2676static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2678 u64 data)
2679{
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682 return dd->rcv_err_status_cnt[0];
2683}
2684
2685/*
2686 * Software counters corresponding to each of the
2687 * error status bits within SendPioErrStatus
2688 */
2689static u64 access_pio_pec_sop_head_parity_err_cnt(
2690 const struct cntr_entry *entry,
2691 void *context, int vl, int mode, u64 data)
2692{
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695 return dd->send_pio_err_status_cnt[35];
2696}
2697
2698static u64 access_pio_pcc_sop_head_parity_err_cnt(
2699 const struct cntr_entry *entry,
2700 void *context, int vl, int mode, u64 data)
2701{
2702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703
2704 return dd->send_pio_err_status_cnt[34];
2705}
2706
2707static u64 access_pio_last_returned_cnt_parity_err_cnt(
2708 const struct cntr_entry *entry,
2709 void *context, int vl, int mode, u64 data)
2710{
2711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713 return dd->send_pio_err_status_cnt[33];
2714}
2715
2716static u64 access_pio_current_free_cnt_parity_err_cnt(
2717 const struct cntr_entry *entry,
2718 void *context, int vl, int mode, u64 data)
2719{
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722 return dd->send_pio_err_status_cnt[32];
2723}
2724
2725static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2726 void *context, int vl, int mode,
2727 u64 data)
2728{
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731 return dd->send_pio_err_status_cnt[31];
2732}
2733
2734static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2735 void *context, int vl, int mode,
2736 u64 data)
2737{
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740 return dd->send_pio_err_status_cnt[30];
2741}
2742
2743static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2744 void *context, int vl, int mode,
2745 u64 data)
2746{
2747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749 return dd->send_pio_err_status_cnt[29];
2750}
2751
2752static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2753 const struct cntr_entry *entry,
2754 void *context, int vl, int mode, u64 data)
2755{
2756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758 return dd->send_pio_err_status_cnt[28];
2759}
2760
2761static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2762 void *context, int vl, int mode,
2763 u64 data)
2764{
2765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767 return dd->send_pio_err_status_cnt[27];
2768}
2769
2770static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2771 void *context, int vl, int mode,
2772 u64 data)
2773{
2774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776 return dd->send_pio_err_status_cnt[26];
2777}
2778
2779static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2780 void *context, int vl,
2781 int mode, u64 data)
2782{
2783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785 return dd->send_pio_err_status_cnt[25];
2786}
2787
2788static u64 access_pio_block_qw_count_parity_err_cnt(
2789 const struct cntr_entry *entry,
2790 void *context, int vl, int mode, u64 data)
2791{
2792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794 return dd->send_pio_err_status_cnt[24];
2795}
2796
2797static u64 access_pio_write_qw_valid_parity_err_cnt(
2798 const struct cntr_entry *entry,
2799 void *context, int vl, int mode, u64 data)
2800{
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803 return dd->send_pio_err_status_cnt[23];
2804}
2805
2806static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2807 void *context, int vl, int mode,
2808 u64 data)
2809{
2810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811
2812 return dd->send_pio_err_status_cnt[22];
2813}
2814
2815static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2816 void *context, int vl,
2817 int mode, u64 data)
2818{
2819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820
2821 return dd->send_pio_err_status_cnt[21];
2822}
2823
2824static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2825 void *context, int vl,
2826 int mode, u64 data)
2827{
2828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829
2830 return dd->send_pio_err_status_cnt[20];
2831}
2832
2833static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2834 void *context, int vl,
2835 int mode, u64 data)
2836{
2837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838
2839 return dd->send_pio_err_status_cnt[19];
2840}
2841
2842static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2843 const struct cntr_entry *entry,
2844 void *context, int vl, int mode, u64 data)
2845{
2846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847
2848 return dd->send_pio_err_status_cnt[18];
2849}
2850
2851static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2852 void *context, int vl, int mode,
2853 u64 data)
2854{
2855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856
2857 return dd->send_pio_err_status_cnt[17];
2858}
2859
2860static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2861 void *context, int vl, int mode,
2862 u64 data)
2863{
2864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865
2866 return dd->send_pio_err_status_cnt[16];
2867}
2868
2869static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2870 const struct cntr_entry *entry,
2871 void *context, int vl, int mode, u64 data)
2872{
2873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874
2875 return dd->send_pio_err_status_cnt[15];
2876}
2877
2878static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2879 const struct cntr_entry *entry,
2880 void *context, int vl, int mode, u64 data)
2881{
2882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883
2884 return dd->send_pio_err_status_cnt[14];
2885}
2886
2887static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2888 const struct cntr_entry *entry,
2889 void *context, int vl, int mode, u64 data)
2890{
2891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892
2893 return dd->send_pio_err_status_cnt[13];
2894}
2895
2896static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2897 const struct cntr_entry *entry,
2898 void *context, int vl, int mode, u64 data)
2899{
2900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901
2902 return dd->send_pio_err_status_cnt[12];
2903}
2904
2905static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2906 const struct cntr_entry *entry,
2907 void *context, int vl, int mode, u64 data)
2908{
2909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910
2911 return dd->send_pio_err_status_cnt[11];
2912}
2913
2914static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2915 const struct cntr_entry *entry,
2916 void *context, int vl, int mode, u64 data)
2917{
2918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919
2920 return dd->send_pio_err_status_cnt[10];
2921}
2922
2923static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2924 const struct cntr_entry *entry,
2925 void *context, int vl, int mode, u64 data)
2926{
2927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928
2929 return dd->send_pio_err_status_cnt[9];
2930}
2931
2932static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2933 const struct cntr_entry *entry,
2934 void *context, int vl, int mode, u64 data)
2935{
2936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937
2938 return dd->send_pio_err_status_cnt[8];
2939}
2940
2941static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2942 const struct cntr_entry *entry,
2943 void *context, int vl, int mode, u64 data)
2944{
2945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946
2947 return dd->send_pio_err_status_cnt[7];
2948}
2949
2950static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2951 void *context, int vl, int mode,
2952 u64 data)
2953{
2954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955
2956 return dd->send_pio_err_status_cnt[6];
2957}
2958
2959static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2960 void *context, int vl, int mode,
2961 u64 data)
2962{
2963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964
2965 return dd->send_pio_err_status_cnt[5];
2966}
2967
2968static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2969 void *context, int vl, int mode,
2970 u64 data)
2971{
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974 return dd->send_pio_err_status_cnt[4];
2975}
2976
2977static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2978 void *context, int vl, int mode,
2979 u64 data)
2980{
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983 return dd->send_pio_err_status_cnt[3];
2984}
2985
2986static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2987 void *context, int vl, int mode,
2988 u64 data)
2989{
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992 return dd->send_pio_err_status_cnt[2];
2993}
2994
2995static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
2996 void *context, int vl,
2997 int mode, u64 data)
2998{
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001 return dd->send_pio_err_status_cnt[1];
3002}
3003
3004static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3005 void *context, int vl, int mode,
3006 u64 data)
3007{
3008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009
3010 return dd->send_pio_err_status_cnt[0];
3011}
3012
3013/*
3014 * Software counters corresponding to each of the
3015 * error status bits within SendDmaErrStatus
3016 */
3017static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3018 const struct cntr_entry *entry,
3019 void *context, int vl, int mode, u64 data)
3020{
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023 return dd->send_dma_err_status_cnt[3];
3024}
3025
3026static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3027 const struct cntr_entry *entry,
3028 void *context, int vl, int mode, u64 data)
3029{
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032 return dd->send_dma_err_status_cnt[2];
3033}
3034
3035static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3036 void *context, int vl, int mode,
3037 u64 data)
3038{
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041 return dd->send_dma_err_status_cnt[1];
3042}
3043
3044static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3045 void *context, int vl, int mode,
3046 u64 data)
3047{
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050 return dd->send_dma_err_status_cnt[0];
3051}
3052
3053/*
3054 * Software counters corresponding to each of the
3055 * error status bits within SendEgressErrStatus
3056 */
3057static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3058 const struct cntr_entry *entry,
3059 void *context, int vl, int mode, u64 data)
3060{
3061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3062
3063 return dd->send_egress_err_status_cnt[63];
3064}
3065
3066static u64 access_tx_read_sdma_memory_csr_err_cnt(
3067 const struct cntr_entry *entry,
3068 void *context, int vl, int mode, u64 data)
3069{
3070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3071
3072 return dd->send_egress_err_status_cnt[62];
3073}
3074
3075static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3076 void *context, int vl, int mode,
3077 u64 data)
3078{
3079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080
3081 return dd->send_egress_err_status_cnt[61];
3082}
3083
3084static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3085 void *context, int vl,
3086 int mode, u64 data)
3087{
3088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090 return dd->send_egress_err_status_cnt[60];
3091}
3092
3093static u64 access_tx_read_sdma_memory_cor_err_cnt(
3094 const struct cntr_entry *entry,
3095 void *context, int vl, int mode, u64 data)
3096{
3097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099 return dd->send_egress_err_status_cnt[59];
3100}
3101
3102static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3103 void *context, int vl, int mode,
3104 u64 data)
3105{
3106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108 return dd->send_egress_err_status_cnt[58];
3109}
3110
3111static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3112 void *context, int vl, int mode,
3113 u64 data)
3114{
3115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116
3117 return dd->send_egress_err_status_cnt[57];
3118}
3119
3120static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3121 void *context, int vl, int mode,
3122 u64 data)
3123{
3124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125
3126 return dd->send_egress_err_status_cnt[56];
3127}
3128
3129static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3130 void *context, int vl, int mode,
3131 u64 data)
3132{
3133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134
3135 return dd->send_egress_err_status_cnt[55];
3136}
3137
3138static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3139 void *context, int vl, int mode,
3140 u64 data)
3141{
3142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144 return dd->send_egress_err_status_cnt[54];
3145}
3146
3147static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3148 void *context, int vl, int mode,
3149 u64 data)
3150{
3151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153 return dd->send_egress_err_status_cnt[53];
3154}
3155
3156static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3157 void *context, int vl, int mode,
3158 u64 data)
3159{
3160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162 return dd->send_egress_err_status_cnt[52];
3163}
3164
3165static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3166 void *context, int vl, int mode,
3167 u64 data)
3168{
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171 return dd->send_egress_err_status_cnt[51];
3172}
3173
3174static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3175 void *context, int vl, int mode,
3176 u64 data)
3177{
3178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179
3180 return dd->send_egress_err_status_cnt[50];
3181}
3182
3183static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3184 void *context, int vl, int mode,
3185 u64 data)
3186{
3187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188
3189 return dd->send_egress_err_status_cnt[49];
3190}
3191
3192static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3193 void *context, int vl, int mode,
3194 u64 data)
3195{
3196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197
3198 return dd->send_egress_err_status_cnt[48];
3199}
3200
3201static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3202 void *context, int vl, int mode,
3203 u64 data)
3204{
3205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206
3207 return dd->send_egress_err_status_cnt[47];
3208}
3209
3210static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3211 void *context, int vl, int mode,
3212 u64 data)
3213{
3214 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215
3216 return dd->send_egress_err_status_cnt[46];
3217}
3218
3219static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3220 void *context, int vl, int mode,
3221 u64 data)
3222{
3223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224
3225 return dd->send_egress_err_status_cnt[45];
3226}
3227
3228static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3229 void *context, int vl,
3230 int mode, u64 data)
3231{
3232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233
3234 return dd->send_egress_err_status_cnt[44];
3235}
3236
3237static u64 access_tx_read_sdma_memory_unc_err_cnt(
3238 const struct cntr_entry *entry,
3239 void *context, int vl, int mode, u64 data)
3240{
3241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242
3243 return dd->send_egress_err_status_cnt[43];
3244}
3245
3246static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3247 void *context, int vl, int mode,
3248 u64 data)
3249{
3250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251
3252 return dd->send_egress_err_status_cnt[42];
3253}
3254
3255static u64 access_tx_credit_return_partiy_err_cnt(
3256 const struct cntr_entry *entry,
3257 void *context, int vl, int mode, u64 data)
3258{
3259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260
3261 return dd->send_egress_err_status_cnt[41];
3262}
3263
3264static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3265 const struct cntr_entry *entry,
3266 void *context, int vl, int mode, u64 data)
3267{
3268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269
3270 return dd->send_egress_err_status_cnt[40];
3271}
3272
3273static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3274 const struct cntr_entry *entry,
3275 void *context, int vl, int mode, u64 data)
3276{
3277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278
3279 return dd->send_egress_err_status_cnt[39];
3280}
3281
3282static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3283 const struct cntr_entry *entry,
3284 void *context, int vl, int mode, u64 data)
3285{
3286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287
3288 return dd->send_egress_err_status_cnt[38];
3289}
3290
3291static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3292 const struct cntr_entry *entry,
3293 void *context, int vl, int mode, u64 data)
3294{
3295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296
3297 return dd->send_egress_err_status_cnt[37];
3298}
3299
3300static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3301 const struct cntr_entry *entry,
3302 void *context, int vl, int mode, u64 data)
3303{
3304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305
3306 return dd->send_egress_err_status_cnt[36];
3307}
3308
3309static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3310 const struct cntr_entry *entry,
3311 void *context, int vl, int mode, u64 data)
3312{
3313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314
3315 return dd->send_egress_err_status_cnt[35];
3316}
3317
3318static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3319 const struct cntr_entry *entry,
3320 void *context, int vl, int mode, u64 data)
3321{
3322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323
3324 return dd->send_egress_err_status_cnt[34];
3325}
3326
3327static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3328 const struct cntr_entry *entry,
3329 void *context, int vl, int mode, u64 data)
3330{
3331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332
3333 return dd->send_egress_err_status_cnt[33];
3334}
3335
3336static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3337 const struct cntr_entry *entry,
3338 void *context, int vl, int mode, u64 data)
3339{
3340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341
3342 return dd->send_egress_err_status_cnt[32];
3343}
3344
3345static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3346 const struct cntr_entry *entry,
3347 void *context, int vl, int mode, u64 data)
3348{
3349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350
3351 return dd->send_egress_err_status_cnt[31];
3352}
3353
3354static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3355 const struct cntr_entry *entry,
3356 void *context, int vl, int mode, u64 data)
3357{
3358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359
3360 return dd->send_egress_err_status_cnt[30];
3361}
3362
3363static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3364 const struct cntr_entry *entry,
3365 void *context, int vl, int mode, u64 data)
3366{
3367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368
3369 return dd->send_egress_err_status_cnt[29];
3370}
3371
3372static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3373 const struct cntr_entry *entry,
3374 void *context, int vl, int mode, u64 data)
3375{
3376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377
3378 return dd->send_egress_err_status_cnt[28];
3379}
3380
3381static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3382 const struct cntr_entry *entry,
3383 void *context, int vl, int mode, u64 data)
3384{
3385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386
3387 return dd->send_egress_err_status_cnt[27];
3388}
3389
3390static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3391 const struct cntr_entry *entry,
3392 void *context, int vl, int mode, u64 data)
3393{
3394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395
3396 return dd->send_egress_err_status_cnt[26];
3397}
3398
3399static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3400 const struct cntr_entry *entry,
3401 void *context, int vl, int mode, u64 data)
3402{
3403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404
3405 return dd->send_egress_err_status_cnt[25];
3406}
3407
3408static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3409 const struct cntr_entry *entry,
3410 void *context, int vl, int mode, u64 data)
3411{
3412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413
3414 return dd->send_egress_err_status_cnt[24];
3415}
3416
3417static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3418 const struct cntr_entry *entry,
3419 void *context, int vl, int mode, u64 data)
3420{
3421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422
3423 return dd->send_egress_err_status_cnt[23];
3424}
3425
3426static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3427 const struct cntr_entry *entry,
3428 void *context, int vl, int mode, u64 data)
3429{
3430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431
3432 return dd->send_egress_err_status_cnt[22];
3433}
3434
3435static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3436 const struct cntr_entry *entry,
3437 void *context, int vl, int mode, u64 data)
3438{
3439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440
3441 return dd->send_egress_err_status_cnt[21];
3442}
3443
3444static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3445 const struct cntr_entry *entry,
3446 void *context, int vl, int mode, u64 data)
3447{
3448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449
3450 return dd->send_egress_err_status_cnt[20];
3451}
3452
3453static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3454 const struct cntr_entry *entry,
3455 void *context, int vl, int mode, u64 data)
3456{
3457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458
3459 return dd->send_egress_err_status_cnt[19];
3460}
3461
3462static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3463 const struct cntr_entry *entry,
3464 void *context, int vl, int mode, u64 data)
3465{
3466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467
3468 return dd->send_egress_err_status_cnt[18];
3469}
3470
3471static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3472 const struct cntr_entry *entry,
3473 void *context, int vl, int mode, u64 data)
3474{
3475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476
3477 return dd->send_egress_err_status_cnt[17];
3478}
3479
3480static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3481 const struct cntr_entry *entry,
3482 void *context, int vl, int mode, u64 data)
3483{
3484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485
3486 return dd->send_egress_err_status_cnt[16];
3487}
3488
3489static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3490 void *context, int vl, int mode,
3491 u64 data)
3492{
3493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494
3495 return dd->send_egress_err_status_cnt[15];
3496}
3497
3498static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3499 void *context, int vl,
3500 int mode, u64 data)
3501{
3502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503
3504 return dd->send_egress_err_status_cnt[14];
3505}
3506
3507static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3508 void *context, int vl, int mode,
3509 u64 data)
3510{
3511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512
3513 return dd->send_egress_err_status_cnt[13];
3514}
3515
3516static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3517 void *context, int vl, int mode,
3518 u64 data)
3519{
3520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521
3522 return dd->send_egress_err_status_cnt[12];
3523}
3524
3525static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3526 const struct cntr_entry *entry,
3527 void *context, int vl, int mode, u64 data)
3528{
3529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530
3531 return dd->send_egress_err_status_cnt[11];
3532}
3533
3534static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3535 void *context, int vl, int mode,
3536 u64 data)
3537{
3538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539
3540 return dd->send_egress_err_status_cnt[10];
3541}
3542
3543static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3544 void *context, int vl, int mode,
3545 u64 data)
3546{
3547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548
3549 return dd->send_egress_err_status_cnt[9];
3550}
3551
3552static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3553 const struct cntr_entry *entry,
3554 void *context, int vl, int mode, u64 data)
3555{
3556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557
3558 return dd->send_egress_err_status_cnt[8];
3559}
3560
3561static u64 access_tx_pio_launch_intf_parity_err_cnt(
3562 const struct cntr_entry *entry,
3563 void *context, int vl, int mode, u64 data)
3564{
3565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566
3567 return dd->send_egress_err_status_cnt[7];
3568}
3569
3570static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3571 void *context, int vl, int mode,
3572 u64 data)
3573{
3574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575
3576 return dd->send_egress_err_status_cnt[6];
3577}
3578
3579static u64 access_tx_incorrect_link_state_err_cnt(
3580 const struct cntr_entry *entry,
3581 void *context, int vl, int mode, u64 data)
3582{
3583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584
3585 return dd->send_egress_err_status_cnt[5];
3586}
3587
3588static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3589 void *context, int vl, int mode,
3590 u64 data)
3591{
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594 return dd->send_egress_err_status_cnt[4];
3595}
3596
3597static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3598 const struct cntr_entry *entry,
3599 void *context, int vl, int mode, u64 data)
3600{
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603 return dd->send_egress_err_status_cnt[3];
3604}
3605
3606static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3607 void *context, int vl, int mode,
3608 u64 data)
3609{
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612 return dd->send_egress_err_status_cnt[2];
3613}
3614
3615static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3616 const struct cntr_entry *entry,
3617 void *context, int vl, int mode, u64 data)
3618{
3619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620
3621 return dd->send_egress_err_status_cnt[1];
3622}
3623
3624static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3625 const struct cntr_entry *entry,
3626 void *context, int vl, int mode, u64 data)
3627{
3628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629
3630 return dd->send_egress_err_status_cnt[0];
3631}
3632
3633/*
3634 * Software counters corresponding to each of the
3635 * error status bits within SendErrStatus
3636 */
3637static u64 access_send_csr_write_bad_addr_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3640{
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643 return dd->send_err_status_cnt[2];
3644}
3645
3646static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3647 void *context, int vl,
3648 int mode, u64 data)
3649{
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652 return dd->send_err_status_cnt[1];
3653}
3654
3655static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3656 void *context, int vl, int mode,
3657 u64 data)
3658{
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661 return dd->send_err_status_cnt[0];
3662}
3663
3664/*
3665 * Software counters corresponding to each of the
3666 * error status bits within SendCtxtErrStatus
3667 */
3668static u64 access_pio_write_out_of_bounds_err_cnt(
3669 const struct cntr_entry *entry,
3670 void *context, int vl, int mode, u64 data)
3671{
3672 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3673
3674 return dd->sw_ctxt_err_status_cnt[4];
3675}
3676
3677static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3678 void *context, int vl, int mode,
3679 u64 data)
3680{
3681 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3682
3683 return dd->sw_ctxt_err_status_cnt[3];
3684}
3685
3686static u64 access_pio_write_crosses_boundary_err_cnt(
3687 const struct cntr_entry *entry,
3688 void *context, int vl, int mode, u64 data)
3689{
3690 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691
3692 return dd->sw_ctxt_err_status_cnt[2];
3693}
3694
3695static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3696 void *context, int vl,
3697 int mode, u64 data)
3698{
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701 return dd->sw_ctxt_err_status_cnt[1];
3702}
3703
3704static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3705 void *context, int vl, int mode,
3706 u64 data)
3707{
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710 return dd->sw_ctxt_err_status_cnt[0];
3711}
3712
3713/*
3714 * Software counters corresponding to each of the
3715 * error status bits within SendDmaEngErrStatus
3716 */
3717static u64 access_sdma_header_request_fifo_cor_err_cnt(
3718 const struct cntr_entry *entry,
3719 void *context, int vl, int mode, u64 data)
3720{
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723 return dd->sw_send_dma_eng_err_status_cnt[23];
3724}
3725
3726static u64 access_sdma_header_storage_cor_err_cnt(
3727 const struct cntr_entry *entry,
3728 void *context, int vl, int mode, u64 data)
3729{
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732 return dd->sw_send_dma_eng_err_status_cnt[22];
3733}
3734
3735static u64 access_sdma_packet_tracking_cor_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3738{
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741 return dd->sw_send_dma_eng_err_status_cnt[21];
3742}
3743
3744static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3745 void *context, int vl, int mode,
3746 u64 data)
3747{
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750 return dd->sw_send_dma_eng_err_status_cnt[20];
3751}
3752
3753static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3754 void *context, int vl, int mode,
3755 u64 data)
3756{
3757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759 return dd->sw_send_dma_eng_err_status_cnt[19];
3760}
3761
3762static u64 access_sdma_header_request_fifo_unc_err_cnt(
3763 const struct cntr_entry *entry,
3764 void *context, int vl, int mode, u64 data)
3765{
3766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768 return dd->sw_send_dma_eng_err_status_cnt[18];
3769}
3770
3771static u64 access_sdma_header_storage_unc_err_cnt(
3772 const struct cntr_entry *entry,
3773 void *context, int vl, int mode, u64 data)
3774{
3775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776
3777 return dd->sw_send_dma_eng_err_status_cnt[17];
3778}
3779
3780static u64 access_sdma_packet_tracking_unc_err_cnt(
3781 const struct cntr_entry *entry,
3782 void *context, int vl, int mode, u64 data)
3783{
3784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785
3786 return dd->sw_send_dma_eng_err_status_cnt[16];
3787}
3788
3789static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3790 void *context, int vl, int mode,
3791 u64 data)
3792{
3793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795 return dd->sw_send_dma_eng_err_status_cnt[15];
3796}
3797
3798static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3799 void *context, int vl, int mode,
3800 u64 data)
3801{
3802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804 return dd->sw_send_dma_eng_err_status_cnt[14];
3805}
3806
3807static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3808 void *context, int vl, int mode,
3809 u64 data)
3810{
3811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813 return dd->sw_send_dma_eng_err_status_cnt[13];
3814}
3815
3816static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3817 void *context, int vl, int mode,
3818 u64 data)
3819{
3820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822 return dd->sw_send_dma_eng_err_status_cnt[12];
3823}
3824
3825static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3826 void *context, int vl, int mode,
3827 u64 data)
3828{
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831 return dd->sw_send_dma_eng_err_status_cnt[11];
3832}
3833
3834static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3835 void *context, int vl, int mode,
3836 u64 data)
3837{
3838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839
3840 return dd->sw_send_dma_eng_err_status_cnt[10];
3841}
3842
3843static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3844 void *context, int vl, int mode,
3845 u64 data)
3846{
3847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848
3849 return dd->sw_send_dma_eng_err_status_cnt[9];
3850}
3851
3852static u64 access_sdma_packet_desc_overflow_err_cnt(
3853 const struct cntr_entry *entry,
3854 void *context, int vl, int mode, u64 data)
3855{
3856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857
3858 return dd->sw_send_dma_eng_err_status_cnt[8];
3859}
3860
3861static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3862 void *context, int vl,
3863 int mode, u64 data)
3864{
3865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866
3867 return dd->sw_send_dma_eng_err_status_cnt[7];
3868}
3869
3870static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3871 void *context, int vl, int mode, u64 data)
3872{
3873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3874
3875 return dd->sw_send_dma_eng_err_status_cnt[6];
3876}
3877
3878static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3879 void *context, int vl, int mode,
3880 u64 data)
3881{
3882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3883
3884 return dd->sw_send_dma_eng_err_status_cnt[5];
3885}
3886
3887static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3888 void *context, int vl, int mode,
3889 u64 data)
3890{
3891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892
3893 return dd->sw_send_dma_eng_err_status_cnt[4];
3894}
3895
3896static u64 access_sdma_tail_out_of_bounds_err_cnt(
3897 const struct cntr_entry *entry,
3898 void *context, int vl, int mode, u64 data)
3899{
3900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901
3902 return dd->sw_send_dma_eng_err_status_cnt[3];
3903}
3904
3905static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3906 void *context, int vl, int mode,
3907 u64 data)
3908{
3909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910
3911 return dd->sw_send_dma_eng_err_status_cnt[2];
3912}
3913
3914static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3915 void *context, int vl, int mode,
3916 u64 data)
3917{
3918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919
3920 return dd->sw_send_dma_eng_err_status_cnt[1];
3921}
3922
3923static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3924 void *context, int vl, int mode,
3925 u64 data)
3926{
3927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928
3929 return dd->sw_send_dma_eng_err_status_cnt[0];
3930}
3931
Mike Marciniszyn77241052015-07-30 15:17:43 -04003932#define def_access_sw_cpu(cntr) \
3933static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3934 void *context, int vl, int mode, u64 data) \
3935{ \
3936 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003937 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3938 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003939 mode, data); \
3940}
3941
3942def_access_sw_cpu(rc_acks);
3943def_access_sw_cpu(rc_qacks);
3944def_access_sw_cpu(rc_delayed_comp);
3945
3946#define def_access_ibp_counter(cntr) \
3947static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3948 void *context, int vl, int mode, u64 data) \
3949{ \
3950 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3951 \
3952 if (vl != CNTR_INVALID_VL) \
3953 return 0; \
3954 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003955 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003956 mode, data); \
3957}
3958
3959def_access_ibp_counter(loop_pkts);
3960def_access_ibp_counter(rc_resends);
3961def_access_ibp_counter(rnr_naks);
3962def_access_ibp_counter(other_naks);
3963def_access_ibp_counter(rc_timeouts);
3964def_access_ibp_counter(pkt_drops);
3965def_access_ibp_counter(dmawait);
3966def_access_ibp_counter(rc_seqnak);
3967def_access_ibp_counter(rc_dupreq);
3968def_access_ibp_counter(rdma_seq);
3969def_access_ibp_counter(unaligned);
3970def_access_ibp_counter(seq_naks);
3971
3972static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3973[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3974[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3975 CNTR_NORMAL),
3976[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3977 CNTR_NORMAL),
3978[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3979 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3980 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003981[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3982 CNTR_NORMAL),
3983[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3984 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3985[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3986 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3987[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
3988 CNTR_NORMAL),
3989[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
3990 CNTR_NORMAL),
3991[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
3992 CNTR_NORMAL),
3993[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
3994 CNTR_NORMAL),
3995[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
3996 CNTR_NORMAL),
3997[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
3998 CNTR_NORMAL),
3999[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4000 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4001[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4002 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4003[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4004 CNTR_SYNTH),
4005[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4006[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4007 CNTR_SYNTH),
4008[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4009 CNTR_SYNTH),
4010[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4011 CNTR_SYNTH),
4012[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4013 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4014[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4015 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4016 CNTR_SYNTH),
4017[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4018 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4019[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4020 CNTR_SYNTH),
4021[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4022 CNTR_SYNTH),
4023[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4024 CNTR_SYNTH),
4025[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4026 CNTR_SYNTH),
4027[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4028 CNTR_SYNTH),
4029[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4030 CNTR_SYNTH),
4031[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4032 CNTR_SYNTH),
4033[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4034 CNTR_SYNTH | CNTR_VL),
4035[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4036 CNTR_SYNTH | CNTR_VL),
4037[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4038[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4039 CNTR_SYNTH | CNTR_VL),
4040[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4041[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4042 CNTR_SYNTH | CNTR_VL),
4043[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4044 CNTR_SYNTH),
4045[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4046 CNTR_SYNTH | CNTR_VL),
4047[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4048 CNTR_SYNTH),
4049[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4050 CNTR_SYNTH | CNTR_VL),
4051[C_DC_TOTAL_CRC] =
4052 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4053 CNTR_SYNTH),
4054[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4055 CNTR_SYNTH),
4056[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4057 CNTR_SYNTH),
4058[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4059 CNTR_SYNTH),
4060[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4061 CNTR_SYNTH),
4062[C_DC_CRC_MULT_LN] =
4063 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4064 CNTR_SYNTH),
4065[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4066 CNTR_SYNTH),
4067[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4068 CNTR_SYNTH),
4069[C_DC_SEQ_CRC_CNT] =
4070 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4071 CNTR_SYNTH),
4072[C_DC_ESC0_ONLY_CNT] =
4073 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4074 CNTR_SYNTH),
4075[C_DC_ESC0_PLUS1_CNT] =
4076 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4077 CNTR_SYNTH),
4078[C_DC_ESC0_PLUS2_CNT] =
4079 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4080 CNTR_SYNTH),
4081[C_DC_REINIT_FROM_PEER_CNT] =
4082 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4083 CNTR_SYNTH),
4084[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4085 CNTR_SYNTH),
4086[C_DC_MISC_FLG_CNT] =
4087 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4088 CNTR_SYNTH),
4089[C_DC_PRF_GOOD_LTP_CNT] =
4090 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4091[C_DC_PRF_ACCEPTED_LTP_CNT] =
4092 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4093 CNTR_SYNTH),
4094[C_DC_PRF_RX_FLIT_CNT] =
4095 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4096[C_DC_PRF_TX_FLIT_CNT] =
4097 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4098[C_DC_PRF_CLK_CNTR] =
4099 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4100[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4101 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4102[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4103 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4104 CNTR_SYNTH),
4105[C_DC_PG_STS_TX_SBE_CNT] =
4106 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4107[C_DC_PG_STS_TX_MBE_CNT] =
4108 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4109 CNTR_SYNTH),
4110[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4111 access_sw_cpu_intr),
4112[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4113 access_sw_cpu_rcv_limit),
4114[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4115 access_sw_vtx_wait),
4116[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4117 access_sw_pio_wait),
4118[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4119 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004120[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4121 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004122[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4123 SEND_DMA_DESC_FETCHED_CNT, 0,
4124 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4125 dev_access_u32_csr),
4126[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4127 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4128 access_sde_int_cnt),
4129[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4130 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4131 access_sde_err_cnt),
4132[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4133 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4134 access_sde_idle_int_cnt),
4135[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4136 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4137 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004138/* MISC_ERR_STATUS */
4139[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4140 CNTR_NORMAL,
4141 access_misc_pll_lock_fail_err_cnt),
4142[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4143 CNTR_NORMAL,
4144 access_misc_mbist_fail_err_cnt),
4145[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4146 CNTR_NORMAL,
4147 access_misc_invalid_eep_cmd_err_cnt),
4148[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4149 CNTR_NORMAL,
4150 access_misc_efuse_done_parity_err_cnt),
4151[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4152 CNTR_NORMAL,
4153 access_misc_efuse_write_err_cnt),
4154[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4155 0, CNTR_NORMAL,
4156 access_misc_efuse_read_bad_addr_err_cnt),
4157[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4158 CNTR_NORMAL,
4159 access_misc_efuse_csr_parity_err_cnt),
4160[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4161 CNTR_NORMAL,
4162 access_misc_fw_auth_failed_err_cnt),
4163[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4164 CNTR_NORMAL,
4165 access_misc_key_mismatch_err_cnt),
4166[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4167 CNTR_NORMAL,
4168 access_misc_sbus_write_failed_err_cnt),
4169[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4170 CNTR_NORMAL,
4171 access_misc_csr_write_bad_addr_err_cnt),
4172[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4173 CNTR_NORMAL,
4174 access_misc_csr_read_bad_addr_err_cnt),
4175[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4176 CNTR_NORMAL,
4177 access_misc_csr_parity_err_cnt),
4178/* CceErrStatus */
4179[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4180 CNTR_NORMAL,
4181 access_sw_cce_err_status_aggregated_cnt),
4182[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4183 CNTR_NORMAL,
4184 access_cce_msix_csr_parity_err_cnt),
4185[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4186 CNTR_NORMAL,
4187 access_cce_int_map_unc_err_cnt),
4188[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4189 CNTR_NORMAL,
4190 access_cce_int_map_cor_err_cnt),
4191[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4192 CNTR_NORMAL,
4193 access_cce_msix_table_unc_err_cnt),
4194[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4195 CNTR_NORMAL,
4196 access_cce_msix_table_cor_err_cnt),
4197[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4198 0, CNTR_NORMAL,
4199 access_cce_rxdma_conv_fifo_parity_err_cnt),
4200[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4201 0, CNTR_NORMAL,
4202 access_cce_rcpl_async_fifo_parity_err_cnt),
4203[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4204 CNTR_NORMAL,
4205 access_cce_seg_write_bad_addr_err_cnt),
4206[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4207 CNTR_NORMAL,
4208 access_cce_seg_read_bad_addr_err_cnt),
4209[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4210 CNTR_NORMAL,
4211 access_la_triggered_cnt),
4212[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4213 CNTR_NORMAL,
4214 access_cce_trgt_cpl_timeout_err_cnt),
4215[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4216 CNTR_NORMAL,
4217 access_pcic_receive_parity_err_cnt),
4218[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4219 CNTR_NORMAL,
4220 access_pcic_transmit_back_parity_err_cnt),
4221[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4222 0, CNTR_NORMAL,
4223 access_pcic_transmit_front_parity_err_cnt),
4224[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4225 CNTR_NORMAL,
4226 access_pcic_cpl_dat_q_unc_err_cnt),
4227[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4228 CNTR_NORMAL,
4229 access_pcic_cpl_hd_q_unc_err_cnt),
4230[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4231 CNTR_NORMAL,
4232 access_pcic_post_dat_q_unc_err_cnt),
4233[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4234 CNTR_NORMAL,
4235 access_pcic_post_hd_q_unc_err_cnt),
4236[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_pcic_retry_sot_mem_unc_err_cnt),
4239[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_pcic_retry_mem_unc_err),
4242[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4243 CNTR_NORMAL,
4244 access_pcic_n_post_dat_q_parity_err_cnt),
4245[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_pcic_n_post_h_q_parity_err_cnt),
4248[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_pcic_cpl_dat_q_cor_err_cnt),
4251[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_pcic_cpl_hd_q_cor_err_cnt),
4254[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_pcic_post_dat_q_cor_err_cnt),
4257[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_pcic_post_hd_q_cor_err_cnt),
4260[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_pcic_retry_sot_mem_cor_err_cnt),
4263[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_pcic_retry_mem_cor_err_cnt),
4266[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4267 "CceCli1AsyncFifoDbgParityError", 0, 0,
4268 CNTR_NORMAL,
4269 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4270[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4271 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4272 CNTR_NORMAL,
4273 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4274 ),
4275[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4276 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4279[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4280 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4283[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4284 0, CNTR_NORMAL,
4285 access_cce_cli2_async_fifo_parity_err_cnt),
4286[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4287 CNTR_NORMAL,
4288 access_cce_csr_cfg_bus_parity_err_cnt),
4289[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4290 0, CNTR_NORMAL,
4291 access_cce_cli0_async_fifo_parity_err_cnt),
4292[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4293 CNTR_NORMAL,
4294 access_cce_rspd_data_parity_err_cnt),
4295[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_trgt_access_err_cnt),
4298[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4299 0, CNTR_NORMAL,
4300 access_cce_trgt_async_fifo_parity_err_cnt),
4301[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_cce_csr_write_bad_addr_err_cnt),
4304[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_cce_csr_read_bad_addr_err_cnt),
4307[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_ccs_csr_parity_err_cnt),
4310
4311/* RcvErrStatus */
4312[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_rx_csr_parity_err_cnt),
4315[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_rx_csr_write_bad_addr_err_cnt),
4318[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4319 CNTR_NORMAL,
4320 access_rx_csr_read_bad_addr_err_cnt),
4321[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4322 CNTR_NORMAL,
4323 access_rx_dma_csr_unc_err_cnt),
4324[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_rx_dma_dq_fsm_encoding_err_cnt),
4327[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_rx_dma_eq_fsm_encoding_err_cnt),
4330[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_rx_dma_csr_parity_err_cnt),
4333[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_rx_rbuf_data_cor_err_cnt),
4336[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rx_rbuf_data_unc_err_cnt),
4339[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rx_dma_data_fifo_rd_cor_err_cnt),
4342[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rx_dma_data_fifo_rd_unc_err_cnt),
4345[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4348[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4351[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_rx_rbuf_desc_part2_cor_err_cnt),
4354[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_rx_rbuf_desc_part2_unc_err_cnt),
4357[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_rx_rbuf_desc_part1_cor_err_cnt),
4360[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_rx_rbuf_desc_part1_unc_err_cnt),
4363[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_hq_intr_fsm_err_cnt),
4366[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_hq_intr_csr_parity_err_cnt),
4369[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_lookup_csr_parity_err_cnt),
4372[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_lookup_rcv_array_cor_err_cnt),
4375[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_lookup_rcv_array_unc_err_cnt),
4378[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4379 0, CNTR_NORMAL,
4380 access_rx_lookup_des_part2_parity_err_cnt),
4381[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4382 0, CNTR_NORMAL,
4383 access_rx_lookup_des_part1_unc_cor_err_cnt),
4384[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_lookup_des_part1_unc_err_cnt),
4387[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_rbuf_next_free_buf_cor_err_cnt),
4390[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_rx_rbuf_next_free_buf_unc_err_cnt),
4393[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4394 "RxRbufFlInitWrAddrParityErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4397[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4398 0, CNTR_NORMAL,
4399 access_rx_rbuf_fl_initdone_parity_err_cnt),
4400[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4401 0, CNTR_NORMAL,
4402 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4403[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4406[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_empty_err_cnt),
4409[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rbuf_full_err_cnt),
4412[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4413 CNTR_NORMAL,
4414 access_rbuf_bad_lookup_err_cnt),
4415[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rbuf_ctx_id_parity_err_cnt),
4418[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rbuf_csr_qeopdw_parity_err_cnt),
4421[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4422 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4425[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4426 "RxRbufCsrQTlPtrParityErr", 0, 0,
4427 CNTR_NORMAL,
4428 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4429[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4430 0, CNTR_NORMAL,
4431 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4432[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4433 0, CNTR_NORMAL,
4434 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4435[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4436 0, 0, CNTR_NORMAL,
4437 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4438[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4439 0, CNTR_NORMAL,
4440 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4441[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4442 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4445[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4446 0, CNTR_NORMAL,
4447 access_rx_rbuf_block_list_read_cor_err_cnt),
4448[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4449 0, CNTR_NORMAL,
4450 access_rx_rbuf_block_list_read_unc_err_cnt),
4451[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4452 CNTR_NORMAL,
4453 access_rx_rbuf_lookup_des_cor_err_cnt),
4454[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_rbuf_lookup_des_unc_err_cnt),
4457[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4458 "RxRbufLookupDesRegUncCorErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4461[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4464[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_rx_rbuf_free_list_cor_err_cnt),
4467[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_rx_rbuf_free_list_unc_err_cnt),
4470[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rcv_fsm_encoding_err_cnt),
4473[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_dma_flag_cor_err_cnt),
4476[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_rx_dma_flag_unc_err_cnt),
4479[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_rx_dc_sop_eop_parity_err_cnt),
4482[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rcv_csr_parity_err_cnt),
4485[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_rcv_qp_map_table_cor_err_cnt),
4488[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_rcv_qp_map_table_unc_err_cnt),
4491[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_rcv_data_cor_err_cnt),
4494[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_rcv_data_unc_err_cnt),
4497[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_rcv_hdr_cor_err_cnt),
4500[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_rcv_hdr_unc_err_cnt),
4503[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_dc_intf_parity_err_cnt),
4506[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_dma_csr_cor_err_cnt),
4509/* SendPioErrStatus */
4510[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_pio_pec_sop_head_parity_err_cnt),
4513[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_pio_pcc_sop_head_parity_err_cnt),
4516[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4517 0, 0, CNTR_NORMAL,
4518 access_pio_last_returned_cnt_parity_err_cnt),
4519[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4520 0, CNTR_NORMAL,
4521 access_pio_current_free_cnt_parity_err_cnt),
4522[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4523 CNTR_NORMAL,
4524 access_pio_reserved_31_err_cnt),
4525[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4526 CNTR_NORMAL,
4527 access_pio_reserved_30_err_cnt),
4528[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4529 CNTR_NORMAL,
4530 access_pio_ppmc_sop_len_err_cnt),
4531[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4532 CNTR_NORMAL,
4533 access_pio_ppmc_bqc_mem_parity_err_cnt),
4534[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_vl_fifo_parity_err_cnt),
4537[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_pio_vlf_sop_parity_err_cnt),
4540[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_pio_vlf_v1_len_parity_err_cnt),
4543[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4544 CNTR_NORMAL,
4545 access_pio_block_qw_count_parity_err_cnt),
4546[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4547 CNTR_NORMAL,
4548 access_pio_write_qw_valid_parity_err_cnt),
4549[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_pio_state_machine_err_cnt),
4552[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_pio_write_data_parity_err_cnt),
4555[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_pio_host_addr_mem_cor_err_cnt),
4558[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_pio_host_addr_mem_unc_err_cnt),
4561[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4564[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_init_sm_in_err_cnt),
4567[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4568 CNTR_NORMAL,
4569 access_pio_ppmc_pbl_fifo_err_cnt),
4570[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4571 0, CNTR_NORMAL,
4572 access_pio_credit_ret_fifo_parity_err_cnt),
4573[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_v1_len_mem_bank1_cor_err_cnt),
4576[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_v1_len_mem_bank0_cor_err_cnt),
4579[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_v1_len_mem_bank1_unc_err_cnt),
4582[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4583 CNTR_NORMAL,
4584 access_pio_v1_len_mem_bank0_unc_err_cnt),
4585[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_sm_pkt_reset_parity_err_cnt),
4588[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_pkt_evict_fifo_parity_err_cnt),
4591[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4592 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4595[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_sbrdctl_crrel_parity_err_cnt),
4598[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_pec_fifo_parity_err_cnt),
4601[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_pcc_fifo_parity_err_cnt),
4604[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_sb_mem_fifo1_err_cnt),
4607[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_sb_mem_fifo0_err_cnt),
4610[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4611 CNTR_NORMAL,
4612 access_pio_csr_parity_err_cnt),
4613[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_write_addr_parity_err_cnt),
4616[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_write_bad_ctxt_err_cnt),
4619/* SendDmaErrStatus */
4620[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4621 0, CNTR_NORMAL,
4622 access_sdma_pcie_req_tracking_cor_err_cnt),
4623[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4624 0, CNTR_NORMAL,
4625 access_sdma_pcie_req_tracking_unc_err_cnt),
4626[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_sdma_csr_parity_err_cnt),
4629[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_sdma_rpy_tag_err_cnt),
4632/* SendEgressErrStatus */
4633[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_tx_read_pio_memory_csr_unc_err_cnt),
4636[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4637 0, CNTR_NORMAL,
4638 access_tx_read_sdma_memory_csr_err_cnt),
4639[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4640 CNTR_NORMAL,
4641 access_tx_egress_fifo_cor_err_cnt),
4642[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_tx_read_pio_memory_cor_err_cnt),
4645[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_tx_read_sdma_memory_cor_err_cnt),
4648[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_tx_sb_hdr_cor_err_cnt),
4651[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4652 CNTR_NORMAL,
4653 access_tx_credit_overrun_err_cnt),
4654[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4655 CNTR_NORMAL,
4656 access_tx_launch_fifo8_cor_err_cnt),
4657[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4658 CNTR_NORMAL,
4659 access_tx_launch_fifo7_cor_err_cnt),
4660[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_tx_launch_fifo6_cor_err_cnt),
4663[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_tx_launch_fifo5_cor_err_cnt),
4666[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_tx_launch_fifo4_cor_err_cnt),
4669[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_tx_launch_fifo3_cor_err_cnt),
4672[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_tx_launch_fifo2_cor_err_cnt),
4675[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_tx_launch_fifo1_cor_err_cnt),
4678[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_tx_launch_fifo0_cor_err_cnt),
4681[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_tx_credit_return_vl_err_cnt),
4684[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_hcrc_insertion_err_cnt),
4687[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4688 CNTR_NORMAL,
4689 access_tx_egress_fifo_unc_err_cnt),
4690[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_read_pio_memory_unc_err_cnt),
4693[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_read_sdma_memory_unc_err_cnt),
4696[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_sb_hdr_unc_err_cnt),
4699[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_credit_return_partiy_err_cnt),
4702[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4703 0, 0, CNTR_NORMAL,
4704 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4705[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4706 0, 0, CNTR_NORMAL,
4707 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4708[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4709 0, 0, CNTR_NORMAL,
4710 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4711[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4712 0, 0, CNTR_NORMAL,
4713 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4714[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4715 0, 0, CNTR_NORMAL,
4716 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4717[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4718 0, 0, CNTR_NORMAL,
4719 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4720[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4721 0, 0, CNTR_NORMAL,
4722 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4723[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4724 0, 0, CNTR_NORMAL,
4725 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4726[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4727 0, 0, CNTR_NORMAL,
4728 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4729[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4730 0, 0, CNTR_NORMAL,
4731 access_tx_sdma15_disallowed_packet_err_cnt),
4732[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4733 0, 0, CNTR_NORMAL,
4734 access_tx_sdma14_disallowed_packet_err_cnt),
4735[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4736 0, 0, CNTR_NORMAL,
4737 access_tx_sdma13_disallowed_packet_err_cnt),
4738[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4739 0, 0, CNTR_NORMAL,
4740 access_tx_sdma12_disallowed_packet_err_cnt),
4741[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4742 0, 0, CNTR_NORMAL,
4743 access_tx_sdma11_disallowed_packet_err_cnt),
4744[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4745 0, 0, CNTR_NORMAL,
4746 access_tx_sdma10_disallowed_packet_err_cnt),
4747[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4748 0, 0, CNTR_NORMAL,
4749 access_tx_sdma9_disallowed_packet_err_cnt),
4750[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4751 0, 0, CNTR_NORMAL,
4752 access_tx_sdma8_disallowed_packet_err_cnt),
4753[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_sdma7_disallowed_packet_err_cnt),
4756[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_sdma6_disallowed_packet_err_cnt),
4759[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_sdma5_disallowed_packet_err_cnt),
4762[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_sdma4_disallowed_packet_err_cnt),
4765[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_sdma3_disallowed_packet_err_cnt),
4768[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_sdma2_disallowed_packet_err_cnt),
4771[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_sdma1_disallowed_packet_err_cnt),
4774[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_sdma0_disallowed_packet_err_cnt),
4777[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4778 CNTR_NORMAL,
4779 access_tx_config_parity_err_cnt),
4780[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4781 CNTR_NORMAL,
4782 access_tx_sbrd_ctl_csr_parity_err_cnt),
4783[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4784 CNTR_NORMAL,
4785 access_tx_launch_csr_parity_err_cnt),
4786[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4787 CNTR_NORMAL,
4788 access_tx_illegal_vl_err_cnt),
4789[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4790 "TxSbrdCtlStateMachineParityErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4793[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4794 CNTR_NORMAL,
4795 access_egress_reserved_10_err_cnt),
4796[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4797 CNTR_NORMAL,
4798 access_egress_reserved_9_err_cnt),
4799[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4800 0, 0, CNTR_NORMAL,
4801 access_tx_sdma_launch_intf_parity_err_cnt),
4802[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_pio_launch_intf_parity_err_cnt),
4805[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4806 CNTR_NORMAL,
4807 access_egress_reserved_6_err_cnt),
4808[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4809 CNTR_NORMAL,
4810 access_tx_incorrect_link_state_err_cnt),
4811[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_linkdown_err_cnt),
4814[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4815 "EgressFifoUnderrunOrParityErr", 0, 0,
4816 CNTR_NORMAL,
4817 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4818[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4819 CNTR_NORMAL,
4820 access_egress_reserved_2_err_cnt),
4821[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4822 CNTR_NORMAL,
4823 access_tx_pkt_integrity_mem_unc_err_cnt),
4824[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4825 CNTR_NORMAL,
4826 access_tx_pkt_integrity_mem_cor_err_cnt),
4827/* SendErrStatus */
4828[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4829 CNTR_NORMAL,
4830 access_send_csr_write_bad_addr_err_cnt),
4831[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4832 CNTR_NORMAL,
4833 access_send_csr_read_bad_addr_err_cnt),
4834[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4835 CNTR_NORMAL,
4836 access_send_csr_parity_cnt),
4837/* SendCtxtErrStatus */
4838[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4839 CNTR_NORMAL,
4840 access_pio_write_out_of_bounds_err_cnt),
4841[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4842 CNTR_NORMAL,
4843 access_pio_write_overflow_err_cnt),
4844[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4845 0, 0, CNTR_NORMAL,
4846 access_pio_write_crosses_boundary_err_cnt),
4847[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4848 CNTR_NORMAL,
4849 access_pio_disallowed_packet_err_cnt),
4850[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4851 CNTR_NORMAL,
4852 access_pio_inconsistent_sop_err_cnt),
4853/* SendDmaEngErrStatus */
4854[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4855 0, 0, CNTR_NORMAL,
4856 access_sdma_header_request_fifo_cor_err_cnt),
4857[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4858 CNTR_NORMAL,
4859 access_sdma_header_storage_cor_err_cnt),
4860[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_sdma_packet_tracking_cor_err_cnt),
4863[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4864 CNTR_NORMAL,
4865 access_sdma_assembly_cor_err_cnt),
4866[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_sdma_desc_table_cor_err_cnt),
4869[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4870 0, 0, CNTR_NORMAL,
4871 access_sdma_header_request_fifo_unc_err_cnt),
4872[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4873 CNTR_NORMAL,
4874 access_sdma_header_storage_unc_err_cnt),
4875[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4876 CNTR_NORMAL,
4877 access_sdma_packet_tracking_unc_err_cnt),
4878[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_sdma_assembly_unc_err_cnt),
4881[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_sdma_desc_table_unc_err_cnt),
4884[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_sdma_timeout_err_cnt),
4887[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_sdma_header_length_err_cnt),
4890[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4891 CNTR_NORMAL,
4892 access_sdma_header_address_err_cnt),
4893[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_sdma_header_select_err_cnt),
4896[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4897 CNTR_NORMAL,
4898 access_sdma_reserved_9_err_cnt),
4899[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_sdma_packet_desc_overflow_err_cnt),
4902[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_sdma_length_mismatch_err_cnt),
4905[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_sdma_halt_err_cnt),
4908[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_mem_read_err_cnt),
4911[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_first_desc_err_cnt),
4914[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_tail_out_of_bounds_err_cnt),
4917[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_too_long_err_cnt),
4920[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_sdma_gen_mismatch_err_cnt),
4923[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004926};
4927
4928static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4929[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4930 CNTR_NORMAL),
4931[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4932 CNTR_NORMAL),
4933[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4934 CNTR_NORMAL),
4935[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4936 CNTR_NORMAL),
4937[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4938 CNTR_NORMAL),
4939[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4940 CNTR_NORMAL),
4941[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4942 CNTR_NORMAL),
4943[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4944[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4945[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4946[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4947 CNTR_SYNTH | CNTR_VL),
4948[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4949 CNTR_SYNTH | CNTR_VL),
4950[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4951 CNTR_SYNTH | CNTR_VL),
4952[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4953[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4954[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4955 access_sw_link_dn_cnt),
4956[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4957 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004958[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4959 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004960[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4961 access_sw_xmit_discards),
4962[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4963 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4964 access_sw_xmit_discards),
4965[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4966 access_xmit_constraint_errs),
4967[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4968 access_rcv_constraint_errs),
4969[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4970[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4971[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4972[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4973[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4974[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4975[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4976[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4977[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4978[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4979[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4980[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4981[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4982 access_sw_cpu_rc_acks),
4983[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4984 access_sw_cpu_rc_qacks),
4985[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4986 access_sw_cpu_rc_delayed_comp),
4987[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
4988[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
4989[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
4990[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
4991[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
4992[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
4993[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
4994[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
4995[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
4996[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
4997[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
4998[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
4999[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5000[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5001[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5002[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5003[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5004[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5005[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5006[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5007[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5008[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5009[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5010[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5011[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5012[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5013[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5014[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5015[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5016[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5017[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5018[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5019[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5020[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5021[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5022[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5023[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5024[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5025[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5026[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5027[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5028[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5029[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5030[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5031[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5032[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5033[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5034[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5035[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5036[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5037[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5038[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5039[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5040[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5041[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5042[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5043[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5044[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5045[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5046[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5047[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5048[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5049[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5050[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5051[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5052[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5053[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5054[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5055[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5056[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5057[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5058[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5059[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5060[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5061[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5062[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5063[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5064[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5065[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5066[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5067};
5068
5069/* ======================================================================== */
5070
Mike Marciniszyn77241052015-07-30 15:17:43 -04005071/* return true if this is chip revision revision a */
5072int is_ax(struct hfi1_devdata *dd)
5073{
5074 u8 chip_rev_minor =
5075 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5076 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5077 return (chip_rev_minor & 0xf0) == 0;
5078}
5079
5080/* return true if this is chip revision revision b */
5081int is_bx(struct hfi1_devdata *dd)
5082{
5083 u8 chip_rev_minor =
5084 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5085 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005086 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005087}
5088
5089/*
5090 * Append string s to buffer buf. Arguments curp and len are the current
5091 * position and remaining length, respectively.
5092 *
5093 * return 0 on success, 1 on out of room
5094 */
5095static int append_str(char *buf, char **curp, int *lenp, const char *s)
5096{
5097 char *p = *curp;
5098 int len = *lenp;
5099 int result = 0; /* success */
5100 char c;
5101
5102 /* add a comma, if first in the buffer */
5103 if (p != buf) {
5104 if (len == 0) {
5105 result = 1; /* out of room */
5106 goto done;
5107 }
5108 *p++ = ',';
5109 len--;
5110 }
5111
5112 /* copy the string */
5113 while ((c = *s++) != 0) {
5114 if (len == 0) {
5115 result = 1; /* out of room */
5116 goto done;
5117 }
5118 *p++ = c;
5119 len--;
5120 }
5121
5122done:
5123 /* write return values */
5124 *curp = p;
5125 *lenp = len;
5126
5127 return result;
5128}
5129
5130/*
5131 * Using the given flag table, print a comma separated string into
5132 * the buffer. End in '*' if the buffer is too short.
5133 */
5134static char *flag_string(char *buf, int buf_len, u64 flags,
5135 struct flag_table *table, int table_size)
5136{
5137 char extra[32];
5138 char *p = buf;
5139 int len = buf_len;
5140 int no_room = 0;
5141 int i;
5142
5143 /* make sure there is at least 2 so we can form "*" */
5144 if (len < 2)
5145 return "";
5146
5147 len--; /* leave room for a nul */
5148 for (i = 0; i < table_size; i++) {
5149 if (flags & table[i].flag) {
5150 no_room = append_str(buf, &p, &len, table[i].str);
5151 if (no_room)
5152 break;
5153 flags &= ~table[i].flag;
5154 }
5155 }
5156
5157 /* any undocumented bits left? */
5158 if (!no_room && flags) {
5159 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5160 no_room = append_str(buf, &p, &len, extra);
5161 }
5162
5163 /* add * if ran out of room */
5164 if (no_room) {
5165 /* may need to back up to add space for a '*' */
5166 if (len == 0)
5167 --p;
5168 *p++ = '*';
5169 }
5170
5171 /* add final nul - space already allocated above */
5172 *p = 0;
5173 return buf;
5174}
5175
5176/* first 8 CCE error interrupt source names */
5177static const char * const cce_misc_names[] = {
5178 "CceErrInt", /* 0 */
5179 "RxeErrInt", /* 1 */
5180 "MiscErrInt", /* 2 */
5181 "Reserved3", /* 3 */
5182 "PioErrInt", /* 4 */
5183 "SDmaErrInt", /* 5 */
5184 "EgressErrInt", /* 6 */
5185 "TxeErrInt" /* 7 */
5186};
5187
5188/*
5189 * Return the miscellaneous error interrupt name.
5190 */
5191static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5192{
5193 if (source < ARRAY_SIZE(cce_misc_names))
5194 strncpy(buf, cce_misc_names[source], bsize);
5195 else
5196 snprintf(buf,
5197 bsize,
5198 "Reserved%u",
5199 source + IS_GENERAL_ERR_START);
5200
5201 return buf;
5202}
5203
5204/*
5205 * Return the SDMA engine error interrupt name.
5206 */
5207static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5208{
5209 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5210 return buf;
5211}
5212
5213/*
5214 * Return the send context error interrupt name.
5215 */
5216static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5217{
5218 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5219 return buf;
5220}
5221
5222static const char * const various_names[] = {
5223 "PbcInt",
5224 "GpioAssertInt",
5225 "Qsfp1Int",
5226 "Qsfp2Int",
5227 "TCritInt"
5228};
5229
5230/*
5231 * Return the various interrupt name.
5232 */
5233static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5234{
5235 if (source < ARRAY_SIZE(various_names))
5236 strncpy(buf, various_names[source], bsize);
5237 else
5238 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5239 return buf;
5240}
5241
5242/*
5243 * Return the DC interrupt name.
5244 */
5245static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5246{
5247 static const char * const dc_int_names[] = {
5248 "common",
5249 "lcb",
5250 "8051",
5251 "lbm" /* local block merge */
5252 };
5253
5254 if (source < ARRAY_SIZE(dc_int_names))
5255 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5256 else
5257 snprintf(buf, bsize, "DCInt%u", source);
5258 return buf;
5259}
5260
5261static const char * const sdma_int_names[] = {
5262 "SDmaInt",
5263 "SdmaIdleInt",
5264 "SdmaProgressInt",
5265};
5266
5267/*
5268 * Return the SDMA engine interrupt name.
5269 */
5270static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5271{
5272 /* what interrupt */
5273 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5274 /* which engine */
5275 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5276
5277 if (likely(what < 3))
5278 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5279 else
5280 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5281 return buf;
5282}
5283
5284/*
5285 * Return the receive available interrupt name.
5286 */
5287static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5288{
5289 snprintf(buf, bsize, "RcvAvailInt%u", source);
5290 return buf;
5291}
5292
5293/*
5294 * Return the receive urgent interrupt name.
5295 */
5296static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5297{
5298 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5299 return buf;
5300}
5301
5302/*
5303 * Return the send credit interrupt name.
5304 */
5305static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5306{
5307 snprintf(buf, bsize, "SendCreditInt%u", source);
5308 return buf;
5309}
5310
5311/*
5312 * Return the reserved interrupt name.
5313 */
5314static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5315{
5316 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5317 return buf;
5318}
5319
5320static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5321{
5322 return flag_string(buf, buf_len, flags,
5323 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5324}
5325
5326static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5327{
5328 return flag_string(buf, buf_len, flags,
5329 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5330}
5331
5332static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5333{
5334 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5335 ARRAY_SIZE(misc_err_status_flags));
5336}
5337
5338static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5339{
5340 return flag_string(buf, buf_len, flags,
5341 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5342}
5343
5344static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags,
5347 sdma_err_status_flags,
5348 ARRAY_SIZE(sdma_err_status_flags));
5349}
5350
5351static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5352{
5353 return flag_string(buf, buf_len, flags,
5354 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5355}
5356
5357static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5358{
5359 return flag_string(buf, buf_len, flags,
5360 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5361}
5362
5363static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5364{
5365 return flag_string(buf, buf_len, flags,
5366 send_err_status_flags,
5367 ARRAY_SIZE(send_err_status_flags));
5368}
5369
5370static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5371{
5372 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005373 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005374
5375 /*
5376 * For most these errors, there is nothing that can be done except
5377 * report or record it.
5378 */
5379 dd_dev_info(dd, "CCE Error: %s\n",
5380 cce_err_status_string(buf, sizeof(buf), reg));
5381
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005382 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5383 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005384 /* this error requires a manual drop into SPC freeze mode */
5385 /* then a fix up */
5386 start_freeze_handling(dd->pport, FREEZE_SELF);
5387 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005388
5389 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5390 if (reg & (1ull << i)) {
5391 incr_cntr64(&dd->cce_err_status_cnt[i]);
5392 /* maintain a counter over all cce_err_status errors */
5393 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5394 }
5395 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005396}
5397
5398/*
5399 * Check counters for receive errors that do not have an interrupt
5400 * associated with them.
5401 */
5402#define RCVERR_CHECK_TIME 10
5403static void update_rcverr_timer(unsigned long opaque)
5404{
5405 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5406 struct hfi1_pportdata *ppd = dd->pport;
5407 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5408
5409 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5410 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5411 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5412 set_link_down_reason(ppd,
5413 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5414 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5415 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5416 }
5417 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5418
5419 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5420}
5421
5422static int init_rcverr(struct hfi1_devdata *dd)
5423{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305424 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005425 /* Assume the hardware counter has been reset */
5426 dd->rcv_ovfl_cnt = 0;
5427 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5428}
5429
5430static void free_rcverr(struct hfi1_devdata *dd)
5431{
5432 if (dd->rcverr_timer.data)
5433 del_timer_sync(&dd->rcverr_timer);
5434 dd->rcverr_timer.data = 0;
5435}
5436
5437static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5438{
5439 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005440 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005441
5442 dd_dev_info(dd, "Receive Error: %s\n",
5443 rxe_err_status_string(buf, sizeof(buf), reg));
5444
5445 if (reg & ALL_RXE_FREEZE_ERR) {
5446 int flags = 0;
5447
5448 /*
5449 * Freeze mode recovery is disabled for the errors
5450 * in RXE_FREEZE_ABORT_MASK
5451 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005452 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005453 flags = FREEZE_ABORT;
5454
5455 start_freeze_handling(dd->pport, flags);
5456 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005457
5458 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5459 if (reg & (1ull << i))
5460 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5461 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005462}
5463
5464static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5465{
5466 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005467 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005468
5469 dd_dev_info(dd, "Misc Error: %s",
5470 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005471 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5472 if (reg & (1ull << i))
5473 incr_cntr64(&dd->misc_err_status_cnt[i]);
5474 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005475}
5476
5477static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5478{
5479 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005480 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005481
5482 dd_dev_info(dd, "PIO Error: %s\n",
5483 pio_err_status_string(buf, sizeof(buf), reg));
5484
5485 if (reg & ALL_PIO_FREEZE_ERR)
5486 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005487
5488 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5489 if (reg & (1ull << i))
5490 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5491 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005492}
5493
5494static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5495{
5496 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005497 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005498
5499 dd_dev_info(dd, "SDMA Error: %s\n",
5500 sdma_err_status_string(buf, sizeof(buf), reg));
5501
5502 if (reg & ALL_SDMA_FREEZE_ERR)
5503 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005504
5505 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5506 if (reg & (1ull << i))
5507 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5508 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005509}
5510
5511static void count_port_inactive(struct hfi1_devdata *dd)
5512{
5513 struct hfi1_pportdata *ppd = dd->pport;
5514
5515 if (ppd->port_xmit_discards < ~(u64)0)
5516 ppd->port_xmit_discards++;
5517}
5518
5519/*
5520 * We have had a "disallowed packet" error during egress. Determine the
5521 * integrity check which failed, and update relevant error counter, etc.
5522 *
5523 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5524 * bit of state per integrity check, and so we can miss the reason for an
5525 * egress error if more than one packet fails the same integrity check
5526 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5527 */
5528static void handle_send_egress_err_info(struct hfi1_devdata *dd)
5529{
5530 struct hfi1_pportdata *ppd = dd->pport;
5531 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5532 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5533 char buf[96];
5534
5535 /* clear down all observed info as quickly as possible after read */
5536 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5537
5538 dd_dev_info(dd,
5539 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5540 info, egress_err_info_string(buf, sizeof(buf), info), src);
5541
5542 /* Eventually add other counters for each bit */
5543
5544 if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
5545 if (ppd->port_xmit_discards < ~(u64)0)
5546 ppd->port_xmit_discards++;
5547 }
5548}
5549
5550/*
5551 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5552 * register. Does it represent a 'port inactive' error?
5553 */
5554static inline int port_inactive_err(u64 posn)
5555{
5556 return (posn >= SEES(TX_LINKDOWN) &&
5557 posn <= SEES(TX_INCORRECT_LINK_STATE));
5558}
5559
5560/*
5561 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5562 * register. Does it represent a 'disallowed packet' error?
5563 */
5564static inline int disallowed_pkt_err(u64 posn)
5565{
5566 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5567 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5568}
5569
5570static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5571{
5572 u64 reg_copy = reg, handled = 0;
5573 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005574 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005575
5576 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5577 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005578 if (is_ax(dd) && (reg &
Mike Marciniszyn77241052015-07-30 15:17:43 -04005579 SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
5580 && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5581 start_freeze_handling(dd->pport, 0);
5582
5583 while (reg_copy) {
5584 int posn = fls64(reg_copy);
5585 /*
5586 * fls64() returns a 1-based offset, but we generally
5587 * want 0-based offsets.
5588 */
5589 int shift = posn - 1;
5590
5591 if (port_inactive_err(shift)) {
5592 count_port_inactive(dd);
5593 handled |= (1ULL << shift);
5594 } else if (disallowed_pkt_err(shift)) {
5595 handle_send_egress_err_info(dd);
5596 handled |= (1ULL << shift);
5597 }
5598 clear_bit(shift, (unsigned long *)&reg_copy);
5599 }
5600
5601 reg &= ~handled;
5602
5603 if (reg)
5604 dd_dev_info(dd, "Egress Error: %s\n",
5605 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005606
5607 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5608 if (reg & (1ull << i))
5609 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5610 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005611}
5612
5613static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5614{
5615 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005616 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005617
5618 dd_dev_info(dd, "Send Error: %s\n",
5619 send_err_status_string(buf, sizeof(buf), reg));
5620
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005621 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5622 if (reg & (1ull << i))
5623 incr_cntr64(&dd->send_err_status_cnt[i]);
5624 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005625}
5626
5627/*
5628 * The maximum number of times the error clear down will loop before
5629 * blocking a repeating error. This value is arbitrary.
5630 */
5631#define MAX_CLEAR_COUNT 20
5632
5633/*
5634 * Clear and handle an error register. All error interrupts are funneled
5635 * through here to have a central location to correctly handle single-
5636 * or multi-shot errors.
5637 *
5638 * For non per-context registers, call this routine with a context value
5639 * of 0 so the per-context offset is zero.
5640 *
5641 * If the handler loops too many times, assume that something is wrong
5642 * and can't be fixed, so mask the error bits.
5643 */
5644static void interrupt_clear_down(struct hfi1_devdata *dd,
5645 u32 context,
5646 const struct err_reg_info *eri)
5647{
5648 u64 reg;
5649 u32 count;
5650
5651 /* read in a loop until no more errors are seen */
5652 count = 0;
5653 while (1) {
5654 reg = read_kctxt_csr(dd, context, eri->status);
5655 if (reg == 0)
5656 break;
5657 write_kctxt_csr(dd, context, eri->clear, reg);
5658 if (likely(eri->handler))
5659 eri->handler(dd, context, reg);
5660 count++;
5661 if (count > MAX_CLEAR_COUNT) {
5662 u64 mask;
5663
5664 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5665 eri->desc, reg);
5666 /*
5667 * Read-modify-write so any other masked bits
5668 * remain masked.
5669 */
5670 mask = read_kctxt_csr(dd, context, eri->mask);
5671 mask &= ~reg;
5672 write_kctxt_csr(dd, context, eri->mask, mask);
5673 break;
5674 }
5675 }
5676}
5677
5678/*
5679 * CCE block "misc" interrupt. Source is < 16.
5680 */
5681static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5682{
5683 const struct err_reg_info *eri = &misc_errs[source];
5684
5685 if (eri->handler) {
5686 interrupt_clear_down(dd, 0, eri);
5687 } else {
5688 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5689 source);
5690 }
5691}
5692
5693static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5694{
5695 return flag_string(buf, buf_len, flags,
5696 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5697}
5698
5699/*
5700 * Send context error interrupt. Source (hw_context) is < 160.
5701 *
5702 * All send context errors cause the send context to halt. The normal
5703 * clear-down mechanism cannot be used because we cannot clear the
5704 * error bits until several other long-running items are done first.
5705 * This is OK because with the context halted, nothing else is going
5706 * to happen on it anyway.
5707 */
5708static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5709 unsigned int hw_context)
5710{
5711 struct send_context_info *sci;
5712 struct send_context *sc;
5713 char flags[96];
5714 u64 status;
5715 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005716 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005717
5718 sw_index = dd->hw_to_sw[hw_context];
5719 if (sw_index >= dd->num_send_contexts) {
5720 dd_dev_err(dd,
5721 "out of range sw index %u for send context %u\n",
5722 sw_index, hw_context);
5723 return;
5724 }
5725 sci = &dd->send_contexts[sw_index];
5726 sc = sci->sc;
5727 if (!sc) {
5728 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5729 sw_index, hw_context);
5730 return;
5731 }
5732
5733 /* tell the software that a halt has begun */
5734 sc_stop(sc, SCF_HALTED);
5735
5736 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5737
5738 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5739 send_context_err_status_string(flags, sizeof(flags), status));
5740
5741 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5742 handle_send_egress_err_info(dd);
5743
5744 /*
5745 * Automatically restart halted kernel contexts out of interrupt
5746 * context. User contexts must ask the driver to restart the context.
5747 */
5748 if (sc->type != SC_USER)
5749 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005750
5751 /*
5752 * Update the counters for the corresponding status bits.
5753 * Note that these particular counters are aggregated over all
5754 * 160 contexts.
5755 */
5756 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5757 if (status & (1ull << i))
5758 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5759 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005760}
5761
5762static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5763 unsigned int source, u64 status)
5764{
5765 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005766 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005767
5768 sde = &dd->per_sdma[source];
5769#ifdef CONFIG_SDMA_VERBOSITY
5770 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5771 slashstrip(__FILE__), __LINE__, __func__);
5772 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5773 sde->this_idx, source, (unsigned long long)status);
5774#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005775 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005776 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005777
5778 /*
5779 * Update the counters for the corresponding status bits.
5780 * Note that these particular counters are aggregated over
5781 * all 16 DMA engines.
5782 */
5783 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5784 if (status & (1ull << i))
5785 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5786 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005787}
5788
5789/*
5790 * CCE block SDMA error interrupt. Source is < 16.
5791 */
5792static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5793{
5794#ifdef CONFIG_SDMA_VERBOSITY
5795 struct sdma_engine *sde = &dd->per_sdma[source];
5796
5797 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5798 slashstrip(__FILE__), __LINE__, __func__);
5799 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5800 source);
5801 sdma_dumpstate(sde);
5802#endif
5803 interrupt_clear_down(dd, source, &sdma_eng_err);
5804}
5805
5806/*
5807 * CCE block "various" interrupt. Source is < 8.
5808 */
5809static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5810{
5811 const struct err_reg_info *eri = &various_err[source];
5812
5813 /*
5814 * TCritInt cannot go through interrupt_clear_down()
5815 * because it is not a second tier interrupt. The handler
5816 * should be called directly.
5817 */
5818 if (source == TCRIT_INT_SOURCE)
5819 handle_temp_err(dd);
5820 else if (eri->handler)
5821 interrupt_clear_down(dd, 0, eri);
5822 else
5823 dd_dev_info(dd,
5824 "%s: Unimplemented/reserved interrupt %d\n",
5825 __func__, source);
5826}
5827
5828static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5829{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005830 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005831 struct hfi1_pportdata *ppd = dd->pport;
5832 unsigned long flags;
5833 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5834
5835 if (reg & QSFP_HFI0_MODPRST_N) {
5836
5837 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5838 __func__);
5839
5840 if (!qsfp_mod_present(ppd)) {
5841 ppd->driver_link_ready = 0;
5842 /*
5843 * Cable removed, reset all our information about the
5844 * cache and cable capabilities
5845 */
5846
5847 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5848 /*
5849 * We don't set cache_refresh_required here as we expect
5850 * an interrupt when a cable is inserted
5851 */
5852 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005853 ppd->qsfp_info.reset_needed = 0;
5854 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005855 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5856 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005857 /* Invert the ModPresent pin now to detect plug-in */
5858 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5859 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005860
5861 if ((ppd->offline_disabled_reason >
5862 HFI1_ODR_MASK(
5863 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) ||
5864 (ppd->offline_disabled_reason ==
5865 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5866 ppd->offline_disabled_reason =
5867 HFI1_ODR_MASK(
5868 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED);
5869
Mike Marciniszyn77241052015-07-30 15:17:43 -04005870 if (ppd->host_link_state == HLS_DN_POLL) {
5871 /*
5872 * The link is still in POLL. This means
5873 * that the normal link down processing
5874 * will not happen. We have to do it here
5875 * before turning the DC off.
5876 */
5877 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5878 }
5879 } else {
5880 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5881 ppd->qsfp_info.cache_valid = 0;
5882 ppd->qsfp_info.cache_refresh_required = 1;
5883 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5884 flags);
5885
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005886 /*
5887 * Stop inversion of ModPresent pin to detect
5888 * removal of the cable
5889 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005890 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005891 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5892 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5893
5894 ppd->offline_disabled_reason =
5895 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005896 }
5897 }
5898
5899 if (reg & QSFP_HFI0_INT_N) {
5900
5901 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5902 __func__);
5903 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5904 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005905 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5906 }
5907
5908 /* Schedule the QSFP work only if there is a cable attached. */
5909 if (qsfp_mod_present(ppd))
5910 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5911}
5912
5913static int request_host_lcb_access(struct hfi1_devdata *dd)
5914{
5915 int ret;
5916
5917 ret = do_8051_command(dd, HCMD_MISC,
5918 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5919 NULL);
5920 if (ret != HCMD_SUCCESS) {
5921 dd_dev_err(dd, "%s: command failed with error %d\n",
5922 __func__, ret);
5923 }
5924 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5925}
5926
5927static int request_8051_lcb_access(struct hfi1_devdata *dd)
5928{
5929 int ret;
5930
5931 ret = do_8051_command(dd, HCMD_MISC,
5932 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
5933 NULL);
5934 if (ret != HCMD_SUCCESS) {
5935 dd_dev_err(dd, "%s: command failed with error %d\n",
5936 __func__, ret);
5937 }
5938 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
5939}
5940
5941/*
5942 * Set the LCB selector - allow host access. The DCC selector always
5943 * points to the host.
5944 */
5945static inline void set_host_lcb_access(struct hfi1_devdata *dd)
5946{
5947 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5948 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
5949 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
5950}
5951
5952/*
5953 * Clear the LCB selector - allow 8051 access. The DCC selector always
5954 * points to the host.
5955 */
5956static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
5957{
5958 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
5959 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
5960}
5961
5962/*
5963 * Acquire LCB access from the 8051. If the host already has access,
5964 * just increment a counter. Otherwise, inform the 8051 that the
5965 * host is taking access.
5966 *
5967 * Returns:
5968 * 0 on success
5969 * -EBUSY if the 8051 has control and cannot be disturbed
5970 * -errno if unable to acquire access from the 8051
5971 */
5972int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
5973{
5974 struct hfi1_pportdata *ppd = dd->pport;
5975 int ret = 0;
5976
5977 /*
5978 * Use the host link state lock so the operation of this routine
5979 * { link state check, selector change, count increment } can occur
5980 * as a unit against a link state change. Otherwise there is a
5981 * race between the state change and the count increment.
5982 */
5983 if (sleep_ok) {
5984 mutex_lock(&ppd->hls_lock);
5985 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03005986 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005987 udelay(1);
5988 }
5989
5990 /* this access is valid only when the link is up */
5991 if ((ppd->host_link_state & HLS_UP) == 0) {
5992 dd_dev_info(dd, "%s: link state %s not up\n",
5993 __func__, link_state_name(ppd->host_link_state));
5994 ret = -EBUSY;
5995 goto done;
5996 }
5997
5998 if (dd->lcb_access_count == 0) {
5999 ret = request_host_lcb_access(dd);
6000 if (ret) {
6001 dd_dev_err(dd,
6002 "%s: unable to acquire LCB access, err %d\n",
6003 __func__, ret);
6004 goto done;
6005 }
6006 set_host_lcb_access(dd);
6007 }
6008 dd->lcb_access_count++;
6009done:
6010 mutex_unlock(&ppd->hls_lock);
6011 return ret;
6012}
6013
6014/*
6015 * Release LCB access by decrementing the use count. If the count is moving
6016 * from 1 to 0, inform 8051 that it has control back.
6017 *
6018 * Returns:
6019 * 0 on success
6020 * -errno if unable to release access to the 8051
6021 */
6022int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6023{
6024 int ret = 0;
6025
6026 /*
6027 * Use the host link state lock because the acquire needed it.
6028 * Here, we only need to keep { selector change, count decrement }
6029 * as a unit.
6030 */
6031 if (sleep_ok) {
6032 mutex_lock(&dd->pport->hls_lock);
6033 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006034 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006035 udelay(1);
6036 }
6037
6038 if (dd->lcb_access_count == 0) {
6039 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6040 __func__);
6041 goto done;
6042 }
6043
6044 if (dd->lcb_access_count == 1) {
6045 set_8051_lcb_access(dd);
6046 ret = request_8051_lcb_access(dd);
6047 if (ret) {
6048 dd_dev_err(dd,
6049 "%s: unable to release LCB access, err %d\n",
6050 __func__, ret);
6051 /* restore host access if the grant didn't work */
6052 set_host_lcb_access(dd);
6053 goto done;
6054 }
6055 }
6056 dd->lcb_access_count--;
6057done:
6058 mutex_unlock(&dd->pport->hls_lock);
6059 return ret;
6060}
6061
6062/*
6063 * Initialize LCB access variables and state. Called during driver load,
6064 * after most of the initialization is finished.
6065 *
6066 * The DC default is LCB access on for the host. The driver defaults to
6067 * leaving access to the 8051. Assign access now - this constrains the call
6068 * to this routine to be after all LCB set-up is done. In particular, after
6069 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6070 */
6071static void init_lcb_access(struct hfi1_devdata *dd)
6072{
6073 dd->lcb_access_count = 0;
6074}
6075
6076/*
6077 * Write a response back to a 8051 request.
6078 */
6079static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6080{
6081 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6082 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6083 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6084 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6085}
6086
6087/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006088 * Handle host requests from the 8051.
6089 *
6090 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006091 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006092void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006093{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006094 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6095 dc_host_req_work);
6096 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006097 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006098 u16 data = 0;
6099 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6100 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006101
6102 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6103 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6104 return; /* no request */
6105
6106 /* zero out COMPLETED so the response is seen */
6107 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6108
6109 /* extract request details */
6110 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6111 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6112 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6113 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6114
6115 switch (type) {
6116 case HREQ_LOAD_CONFIG:
6117 case HREQ_SAVE_CONFIG:
6118 case HREQ_READ_CONFIG:
6119 case HREQ_SET_TX_EQ_ABS:
6120 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006121 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6122 type);
6123 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6124 break;
6125
Easwar Hariharancbac3862016-02-03 14:31:31 -08006126 case HREQ_ENABLE:
6127 lanes = data & 0xF;
6128 for (i = 0; lanes; lanes >>= 1, i++) {
6129 if (!(lanes & 1))
6130 continue;
6131 if (data & 0x200) {
6132 /* enable TX CDR */
6133 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6134 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6135 cdr_ctrl_byte |= (1 << (i + 4));
6136 } else {
6137 /* disable TX CDR */
6138 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6139 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6140 cdr_ctrl_byte &= ~(1 << (i + 4));
6141 }
6142
6143 if (data & 0x800) {
6144 /* enable RX CDR */
6145 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6146 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6147 cdr_ctrl_byte |= (1 << i);
6148 } else {
6149 /* disable RX CDR */
6150 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6151 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6152 cdr_ctrl_byte &= ~(1 << i);
6153 }
6154 }
6155 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6156 &cdr_ctrl_byte, 1);
6157 hreq_response(dd, HREQ_SUCCESS, data);
6158 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6159 break;
6160
Mike Marciniszyn77241052015-07-30 15:17:43 -04006161 case HREQ_CONFIG_DONE:
6162 hreq_response(dd, HREQ_SUCCESS, 0);
6163 break;
6164
6165 case HREQ_INTERFACE_TEST:
6166 hreq_response(dd, HREQ_SUCCESS, data);
6167 break;
6168
6169 default:
6170 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6171 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6172 break;
6173 }
6174}
6175
6176static void write_global_credit(struct hfi1_devdata *dd,
6177 u8 vau, u16 total, u16 shared)
6178{
6179 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6180 ((u64)total
6181 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6182 | ((u64)shared
6183 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6184 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6185}
6186
6187/*
6188 * Set up initial VL15 credits of the remote. Assumes the rest of
6189 * the CM credit registers are zero from a previous global or credit reset .
6190 */
6191void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6192{
6193 /* leave shared count at zero for both global and VL15 */
6194 write_global_credit(dd, vau, vl15buf, 0);
6195
6196 /* We may need some credits for another VL when sending packets
6197 * with the snoop interface. Dividing it down the middle for VL15
6198 * and VL0 should suffice.
6199 */
6200 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6201 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6202 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6203 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6204 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6205 } else {
6206 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6207 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6208 }
6209}
6210
6211/*
6212 * Zero all credit details from the previous connection and
6213 * reset the CM manager's internal counters.
6214 */
6215void reset_link_credits(struct hfi1_devdata *dd)
6216{
6217 int i;
6218
6219 /* remove all previous VL credit limits */
6220 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6221 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6222 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6223 write_global_credit(dd, 0, 0, 0);
6224 /* reset the CM block */
6225 pio_send_control(dd, PSC_CM_RESET);
6226}
6227
6228/* convert a vCU to a CU */
6229static u32 vcu_to_cu(u8 vcu)
6230{
6231 return 1 << vcu;
6232}
6233
6234/* convert a CU to a vCU */
6235static u8 cu_to_vcu(u32 cu)
6236{
6237 return ilog2(cu);
6238}
6239
6240/* convert a vAU to an AU */
6241static u32 vau_to_au(u8 vau)
6242{
6243 return 8 * (1 << vau);
6244}
6245
6246static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6247{
6248 ppd->sm_trap_qp = 0x0;
6249 ppd->sa_qp = 0x1;
6250}
6251
6252/*
6253 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6254 */
6255static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6256{
6257 u64 reg;
6258
6259 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6260 write_csr(dd, DC_LCB_CFG_RUN, 0);
6261 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6262 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6263 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6264 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6265 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6266 reg = read_csr(dd, DCC_CFG_RESET);
6267 write_csr(dd, DCC_CFG_RESET,
6268 reg
6269 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6270 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6271 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6272 if (!abort) {
6273 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6274 write_csr(dd, DCC_CFG_RESET, reg);
6275 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6276 }
6277}
6278
6279/*
6280 * This routine should be called after the link has been transitioned to
6281 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6282 * reset).
6283 *
6284 * The expectation is that the caller of this routine would have taken
6285 * care of properly transitioning the link into the correct state.
6286 */
6287static void dc_shutdown(struct hfi1_devdata *dd)
6288{
6289 unsigned long flags;
6290
6291 spin_lock_irqsave(&dd->dc8051_lock, flags);
6292 if (dd->dc_shutdown) {
6293 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6294 return;
6295 }
6296 dd->dc_shutdown = 1;
6297 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6298 /* Shutdown the LCB */
6299 lcb_shutdown(dd, 1);
6300 /* Going to OFFLINE would have causes the 8051 to put the
6301 * SerDes into reset already. Just need to shut down the 8051,
6302 * itself. */
6303 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6304}
6305
6306/* Calling this after the DC has been brought out of reset should not
6307 * do any damage. */
6308static void dc_start(struct hfi1_devdata *dd)
6309{
6310 unsigned long flags;
6311 int ret;
6312
6313 spin_lock_irqsave(&dd->dc8051_lock, flags);
6314 if (!dd->dc_shutdown)
6315 goto done;
6316 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6317 /* Take the 8051 out of reset */
6318 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6319 /* Wait until 8051 is ready */
6320 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6321 if (ret) {
6322 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6323 __func__);
6324 }
6325 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6326 write_csr(dd, DCC_CFG_RESET, 0x10);
6327 /* lcb_shutdown() with abort=1 does not restore these */
6328 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6329 spin_lock_irqsave(&dd->dc8051_lock, flags);
6330 dd->dc_shutdown = 0;
6331done:
6332 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6333}
6334
6335/*
6336 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6337 */
6338static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6339{
6340 u64 rx_radr, tx_radr;
6341 u32 version;
6342
6343 if (dd->icode != ICODE_FPGA_EMULATION)
6344 return;
6345
6346 /*
6347 * These LCB defaults on emulator _s are good, nothing to do here:
6348 * LCB_CFG_TX_FIFOS_RADR
6349 * LCB_CFG_RX_FIFOS_RADR
6350 * LCB_CFG_LN_DCLK
6351 * LCB_CFG_IGNORE_LOST_RCLK
6352 */
6353 if (is_emulator_s(dd))
6354 return;
6355 /* else this is _p */
6356
6357 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006358 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006359 version = 0x2d; /* all B0 use 0x2d or higher settings */
6360
6361 if (version <= 0x12) {
6362 /* release 0x12 and below */
6363
6364 /*
6365 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6366 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6367 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6368 */
6369 rx_radr =
6370 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6371 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6372 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6373 /*
6374 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6375 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6376 */
6377 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6378 } else if (version <= 0x18) {
6379 /* release 0x13 up to 0x18 */
6380 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6381 rx_radr =
6382 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6383 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6384 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6385 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6386 } else if (version == 0x19) {
6387 /* release 0x19 */
6388 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6389 rx_radr =
6390 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6391 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6392 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6393 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6394 } else if (version == 0x1a) {
6395 /* release 0x1a */
6396 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6397 rx_radr =
6398 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6399 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6400 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6401 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6402 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6403 } else {
6404 /* release 0x1b and higher */
6405 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6406 rx_radr =
6407 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6408 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6409 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6410 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6411 }
6412
6413 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6414 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6415 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6416 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6417 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6418}
6419
6420/*
6421 * Handle a SMA idle message
6422 *
6423 * This is a work-queue function outside of the interrupt.
6424 */
6425void handle_sma_message(struct work_struct *work)
6426{
6427 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6428 sma_message_work);
6429 struct hfi1_devdata *dd = ppd->dd;
6430 u64 msg;
6431 int ret;
6432
6433 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6434 is stripped off */
6435 ret = read_idle_sma(dd, &msg);
6436 if (ret)
6437 return;
6438 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6439 /*
6440 * React to the SMA message. Byte[1] (0 for us) is the command.
6441 */
6442 switch (msg & 0xff) {
6443 case SMA_IDLE_ARM:
6444 /*
6445 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6446 * State Transitions
6447 *
6448 * Only expected in INIT or ARMED, discard otherwise.
6449 */
6450 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6451 ppd->neighbor_normal = 1;
6452 break;
6453 case SMA_IDLE_ACTIVE:
6454 /*
6455 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6456 * State Transitions
6457 *
6458 * Can activate the node. Discard otherwise.
6459 */
6460 if (ppd->host_link_state == HLS_UP_ARMED
6461 && ppd->is_active_optimize_enabled) {
6462 ppd->neighbor_normal = 1;
6463 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6464 if (ret)
6465 dd_dev_err(
6466 dd,
6467 "%s: received Active SMA idle message, couldn't set link to Active\n",
6468 __func__);
6469 }
6470 break;
6471 default:
6472 dd_dev_err(dd,
6473 "%s: received unexpected SMA idle message 0x%llx\n",
6474 __func__, msg);
6475 break;
6476 }
6477}
6478
6479static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6480{
6481 u64 rcvctrl;
6482 unsigned long flags;
6483
6484 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6485 rcvctrl = read_csr(dd, RCV_CTRL);
6486 rcvctrl |= add;
6487 rcvctrl &= ~clear;
6488 write_csr(dd, RCV_CTRL, rcvctrl);
6489 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6490}
6491
6492static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6493{
6494 adjust_rcvctrl(dd, add, 0);
6495}
6496
6497static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6498{
6499 adjust_rcvctrl(dd, 0, clear);
6500}
6501
6502/*
6503 * Called from all interrupt handlers to start handling an SPC freeze.
6504 */
6505void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6506{
6507 struct hfi1_devdata *dd = ppd->dd;
6508 struct send_context *sc;
6509 int i;
6510
6511 if (flags & FREEZE_SELF)
6512 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6513
6514 /* enter frozen mode */
6515 dd->flags |= HFI1_FROZEN;
6516
6517 /* notify all SDMA engines that they are going into a freeze */
6518 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6519
6520 /* do halt pre-handling on all enabled send contexts */
6521 for (i = 0; i < dd->num_send_contexts; i++) {
6522 sc = dd->send_contexts[i].sc;
6523 if (sc && (sc->flags & SCF_ENABLED))
6524 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6525 }
6526
6527 /* Send context are frozen. Notify user space */
6528 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6529
6530 if (flags & FREEZE_ABORT) {
6531 dd_dev_err(dd,
6532 "Aborted freeze recovery. Please REBOOT system\n");
6533 return;
6534 }
6535 /* queue non-interrupt handler */
6536 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6537}
6538
6539/*
6540 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6541 * depending on the "freeze" parameter.
6542 *
6543 * No need to return an error if it times out, our only option
6544 * is to proceed anyway.
6545 */
6546static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6547{
6548 unsigned long timeout;
6549 u64 reg;
6550
6551 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6552 while (1) {
6553 reg = read_csr(dd, CCE_STATUS);
6554 if (freeze) {
6555 /* waiting until all indicators are set */
6556 if ((reg & ALL_FROZE) == ALL_FROZE)
6557 return; /* all done */
6558 } else {
6559 /* waiting until all indicators are clear */
6560 if ((reg & ALL_FROZE) == 0)
6561 return; /* all done */
6562 }
6563
6564 if (time_after(jiffies, timeout)) {
6565 dd_dev_err(dd,
6566 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6567 freeze ? "" : "un",
6568 reg & ALL_FROZE,
6569 freeze ? ALL_FROZE : 0ull);
6570 return;
6571 }
6572 usleep_range(80, 120);
6573 }
6574}
6575
6576/*
6577 * Do all freeze handling for the RXE block.
6578 */
6579static void rxe_freeze(struct hfi1_devdata *dd)
6580{
6581 int i;
6582
6583 /* disable port */
6584 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6585
6586 /* disable all receive contexts */
6587 for (i = 0; i < dd->num_rcv_contexts; i++)
6588 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6589}
6590
6591/*
6592 * Unfreeze handling for the RXE block - kernel contexts only.
6593 * This will also enable the port. User contexts will do unfreeze
6594 * handling on a per-context basis as they call into the driver.
6595 *
6596 */
6597static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6598{
6599 int i;
6600
6601 /* enable all kernel contexts */
6602 for (i = 0; i < dd->n_krcv_queues; i++)
6603 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
6604
6605 /* enable port */
6606 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6607}
6608
6609/*
6610 * Non-interrupt SPC freeze handling.
6611 *
6612 * This is a work-queue function outside of the triggering interrupt.
6613 */
6614void handle_freeze(struct work_struct *work)
6615{
6616 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6617 freeze_work);
6618 struct hfi1_devdata *dd = ppd->dd;
6619
6620 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006621 wait_for_freeze_status(dd, 1);
6622
6623 /* SPC is now frozen */
6624
6625 /* do send PIO freeze steps */
6626 pio_freeze(dd);
6627
6628 /* do send DMA freeze steps */
6629 sdma_freeze(dd);
6630
6631 /* do send egress freeze steps - nothing to do */
6632
6633 /* do receive freeze steps */
6634 rxe_freeze(dd);
6635
6636 /*
6637 * Unfreeze the hardware - clear the freeze, wait for each
6638 * block's frozen bit to clear, then clear the frozen flag.
6639 */
6640 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6641 wait_for_freeze_status(dd, 0);
6642
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006643 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006644 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6645 wait_for_freeze_status(dd, 1);
6646 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6647 wait_for_freeze_status(dd, 0);
6648 }
6649
6650 /* do send PIO unfreeze steps for kernel contexts */
6651 pio_kernel_unfreeze(dd);
6652
6653 /* do send DMA unfreeze steps */
6654 sdma_unfreeze(dd);
6655
6656 /* do send egress unfreeze steps - nothing to do */
6657
6658 /* do receive unfreeze steps for kernel contexts */
6659 rxe_kernel_unfreeze(dd);
6660
6661 /*
6662 * The unfreeze procedure touches global device registers when
6663 * it disables and re-enables RXE. Mark the device unfrozen
6664 * after all that is done so other parts of the driver waiting
6665 * for the device to unfreeze don't do things out of order.
6666 *
6667 * The above implies that the meaning of HFI1_FROZEN flag is
6668 * "Device has gone into freeze mode and freeze mode handling
6669 * is still in progress."
6670 *
6671 * The flag will be removed when freeze mode processing has
6672 * completed.
6673 */
6674 dd->flags &= ~HFI1_FROZEN;
6675 wake_up(&dd->event_queue);
6676
6677 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006678}
6679
6680/*
6681 * Handle a link up interrupt from the 8051.
6682 *
6683 * This is a work-queue function outside of the interrupt.
6684 */
6685void handle_link_up(struct work_struct *work)
6686{
6687 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6688 link_up_work);
6689 set_link_state(ppd, HLS_UP_INIT);
6690
6691 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6692 read_ltp_rtt(ppd->dd);
6693 /*
6694 * OPA specifies that certain counters are cleared on a transition
6695 * to link up, so do that.
6696 */
6697 clear_linkup_counters(ppd->dd);
6698 /*
6699 * And (re)set link up default values.
6700 */
6701 set_linkup_defaults(ppd);
6702
6703 /* enforce link speed enabled */
6704 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6705 /* oops - current speed is not enabled, bounce */
6706 dd_dev_err(ppd->dd,
6707 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6708 ppd->link_speed_active, ppd->link_speed_enabled);
6709 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6710 OPA_LINKDOWN_REASON_SPEED_POLICY);
6711 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006712 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006713 start_link(ppd);
6714 }
6715}
6716
6717/* Several pieces of LNI information were cached for SMA in ppd.
6718 * Reset these on link down */
6719static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6720{
6721 ppd->neighbor_guid = 0;
6722 ppd->neighbor_port_number = 0;
6723 ppd->neighbor_type = 0;
6724 ppd->neighbor_fm_security = 0;
6725}
6726
6727/*
6728 * Handle a link down interrupt from the 8051.
6729 *
6730 * This is a work-queue function outside of the interrupt.
6731 */
6732void handle_link_down(struct work_struct *work)
6733{
6734 u8 lcl_reason, neigh_reason = 0;
6735 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6736 link_down_work);
6737
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006738 if ((ppd->host_link_state &
6739 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6740 ppd->port_type == PORT_TYPE_FIXED)
6741 ppd->offline_disabled_reason =
6742 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6743
6744 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006745 set_link_state(ppd, HLS_DN_OFFLINE);
6746
6747 lcl_reason = 0;
6748 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6749
6750 /*
6751 * If no reason, assume peer-initiated but missed
6752 * LinkGoingDown idle flits.
6753 */
6754 if (neigh_reason == 0)
6755 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6756
6757 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6758
6759 reset_neighbor_info(ppd);
6760
6761 /* disable the port */
6762 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6763
6764 /* If there is no cable attached, turn the DC off. Otherwise,
6765 * start the link bring up. */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006766 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006767 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006768 } else {
6769 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006770 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006771 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006772}
6773
6774void handle_link_bounce(struct work_struct *work)
6775{
6776 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6777 link_bounce_work);
6778
6779 /*
6780 * Only do something if the link is currently up.
6781 */
6782 if (ppd->host_link_state & HLS_UP) {
6783 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006784 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006785 start_link(ppd);
6786 } else {
6787 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6788 __func__, link_state_name(ppd->host_link_state));
6789 }
6790}
6791
6792/*
6793 * Mask conversion: Capability exchange to Port LTP. The capability
6794 * exchange has an implicit 16b CRC that is mandatory.
6795 */
6796static int cap_to_port_ltp(int cap)
6797{
6798 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6799
6800 if (cap & CAP_CRC_14B)
6801 port_ltp |= PORT_LTP_CRC_MODE_14;
6802 if (cap & CAP_CRC_48B)
6803 port_ltp |= PORT_LTP_CRC_MODE_48;
6804 if (cap & CAP_CRC_12B_16B_PER_LANE)
6805 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6806
6807 return port_ltp;
6808}
6809
6810/*
6811 * Convert an OPA Port LTP mask to capability mask
6812 */
6813int port_ltp_to_cap(int port_ltp)
6814{
6815 int cap_mask = 0;
6816
6817 if (port_ltp & PORT_LTP_CRC_MODE_14)
6818 cap_mask |= CAP_CRC_14B;
6819 if (port_ltp & PORT_LTP_CRC_MODE_48)
6820 cap_mask |= CAP_CRC_48B;
6821 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6822 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6823
6824 return cap_mask;
6825}
6826
6827/*
6828 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6829 */
6830static int lcb_to_port_ltp(int lcb_crc)
6831{
6832 int port_ltp = 0;
6833
6834 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6835 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6836 else if (lcb_crc == LCB_CRC_48B)
6837 port_ltp = PORT_LTP_CRC_MODE_48;
6838 else if (lcb_crc == LCB_CRC_14B)
6839 port_ltp = PORT_LTP_CRC_MODE_14;
6840 else
6841 port_ltp = PORT_LTP_CRC_MODE_16;
6842
6843 return port_ltp;
6844}
6845
6846/*
6847 * Our neighbor has indicated that we are allowed to act as a fabric
6848 * manager, so place the full management partition key in the second
6849 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6850 * that we should already have the limited management partition key in
6851 * array element 1, and also that the port is not yet up when
6852 * add_full_mgmt_pkey() is invoked.
6853 */
6854static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6855{
6856 struct hfi1_devdata *dd = ppd->dd;
6857
Dean Luick87645222015-12-01 15:38:21 -05006858 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6859 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6860 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6861 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006862 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6863 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6864}
6865
6866/*
6867 * Convert the given link width to the OPA link width bitmask.
6868 */
6869static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6870{
6871 switch (width) {
6872 case 0:
6873 /*
6874 * Simulator and quick linkup do not set the width.
6875 * Just set it to 4x without complaint.
6876 */
6877 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6878 return OPA_LINK_WIDTH_4X;
6879 return 0; /* no lanes up */
6880 case 1: return OPA_LINK_WIDTH_1X;
6881 case 2: return OPA_LINK_WIDTH_2X;
6882 case 3: return OPA_LINK_WIDTH_3X;
6883 default:
6884 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6885 __func__, width);
6886 /* fall through */
6887 case 4: return OPA_LINK_WIDTH_4X;
6888 }
6889}
6890
6891/*
6892 * Do a population count on the bottom nibble.
6893 */
6894static const u8 bit_counts[16] = {
6895 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6896};
6897static inline u8 nibble_to_count(u8 nibble)
6898{
6899 return bit_counts[nibble & 0xf];
6900}
6901
6902/*
6903 * Read the active lane information from the 8051 registers and return
6904 * their widths.
6905 *
6906 * Active lane information is found in these 8051 registers:
6907 * enable_lane_tx
6908 * enable_lane_rx
6909 */
6910static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
6911 u16 *rx_width)
6912{
6913 u16 tx, rx;
6914 u8 enable_lane_rx;
6915 u8 enable_lane_tx;
6916 u8 tx_polarity_inversion;
6917 u8 rx_polarity_inversion;
6918 u8 max_rate;
6919
6920 /* read the active lanes */
6921 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
6922 &rx_polarity_inversion, &max_rate);
6923 read_local_lni(dd, &enable_lane_rx);
6924
6925 /* convert to counts */
6926 tx = nibble_to_count(enable_lane_tx);
6927 rx = nibble_to_count(enable_lane_rx);
6928
6929 /*
6930 * Set link_speed_active here, overriding what was set in
6931 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
6932 * set the max_rate field in handle_verify_cap until v0.19.
6933 */
6934 if ((dd->icode == ICODE_RTL_SILICON)
6935 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
6936 /* max_rate: 0 = 12.5G, 1 = 25G */
6937 switch (max_rate) {
6938 case 0:
6939 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
6940 break;
6941 default:
6942 dd_dev_err(dd,
6943 "%s: unexpected max rate %d, using 25Gb\n",
6944 __func__, (int)max_rate);
6945 /* fall through */
6946 case 1:
6947 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
6948 break;
6949 }
6950 }
6951
6952 dd_dev_info(dd,
6953 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
6954 enable_lane_tx, tx, enable_lane_rx, rx);
6955 *tx_width = link_width_to_bits(dd, tx);
6956 *rx_width = link_width_to_bits(dd, rx);
6957}
6958
6959/*
6960 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
6961 * Valid after the end of VerifyCap and during LinkUp. Does not change
6962 * after link up. I.e. look elsewhere for downgrade information.
6963 *
6964 * Bits are:
6965 * + bits [7:4] contain the number of active transmitters
6966 * + bits [3:0] contain the number of active receivers
6967 * These are numbers 1 through 4 and can be different values if the
6968 * link is asymmetric.
6969 *
6970 * verify_cap_local_fm_link_width[0] retains its original value.
6971 */
6972static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
6973 u16 *rx_width)
6974{
6975 u16 widths, tx, rx;
6976 u8 misc_bits, local_flags;
6977 u16 active_tx, active_rx;
6978
6979 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
6980 tx = widths >> 12;
6981 rx = (widths >> 8) & 0xf;
6982
6983 *tx_width = link_width_to_bits(dd, tx);
6984 *rx_width = link_width_to_bits(dd, rx);
6985
6986 /* print the active widths */
6987 get_link_widths(dd, &active_tx, &active_rx);
6988}
6989
6990/*
6991 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
6992 * hardware information when the link first comes up.
6993 *
6994 * The link width is not available until after VerifyCap.AllFramesReceived
6995 * (the trigger for handle_verify_cap), so this is outside that routine
6996 * and should be called when the 8051 signals linkup.
6997 */
6998void get_linkup_link_widths(struct hfi1_pportdata *ppd)
6999{
7000 u16 tx_width, rx_width;
7001
7002 /* get end-of-LNI link widths */
7003 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7004
7005 /* use tx_width as the link is supposed to be symmetric on link up */
7006 ppd->link_width_active = tx_width;
7007 /* link width downgrade active (LWD.A) starts out matching LW.A */
7008 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7009 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7010 /* per OPA spec, on link up LWD.E resets to LWD.S */
7011 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7012 /* cache the active egress rate (units {10^6 bits/sec]) */
7013 ppd->current_egress_rate = active_egress_rate(ppd);
7014}
7015
7016/*
7017 * Handle a verify capabilities interrupt from the 8051.
7018 *
7019 * This is a work-queue function outside of the interrupt.
7020 */
7021void handle_verify_cap(struct work_struct *work)
7022{
7023 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7024 link_vc_work);
7025 struct hfi1_devdata *dd = ppd->dd;
7026 u64 reg;
7027 u8 power_management;
7028 u8 continious;
7029 u8 vcu;
7030 u8 vau;
7031 u8 z;
7032 u16 vl15buf;
7033 u16 link_widths;
7034 u16 crc_mask;
7035 u16 crc_val;
7036 u16 device_id;
7037 u16 active_tx, active_rx;
7038 u8 partner_supported_crc;
7039 u8 remote_tx_rate;
7040 u8 device_rev;
7041
7042 set_link_state(ppd, HLS_VERIFY_CAP);
7043
7044 lcb_shutdown(dd, 0);
7045 adjust_lcb_for_fpga_serdes(dd);
7046
7047 /*
7048 * These are now valid:
7049 * remote VerifyCap fields in the general LNI config
7050 * CSR DC8051_STS_REMOTE_GUID
7051 * CSR DC8051_STS_REMOTE_NODE_TYPE
7052 * CSR DC8051_STS_REMOTE_FM_SECURITY
7053 * CSR DC8051_STS_REMOTE_PORT_NO
7054 */
7055
7056 read_vc_remote_phy(dd, &power_management, &continious);
7057 read_vc_remote_fabric(
7058 dd,
7059 &vau,
7060 &z,
7061 &vcu,
7062 &vl15buf,
7063 &partner_supported_crc);
7064 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7065 read_remote_device_id(dd, &device_id, &device_rev);
7066 /*
7067 * And the 'MgmtAllowed' information, which is exchanged during
7068 * LNI, is also be available at this point.
7069 */
7070 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7071 /* print the active widths */
7072 get_link_widths(dd, &active_tx, &active_rx);
7073 dd_dev_info(dd,
7074 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7075 (int)power_management, (int)continious);
7076 dd_dev_info(dd,
7077 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7078 (int)vau,
7079 (int)z,
7080 (int)vcu,
7081 (int)vl15buf,
7082 (int)partner_supported_crc);
7083 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7084 (u32)remote_tx_rate, (u32)link_widths);
7085 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7086 (u32)device_id, (u32)device_rev);
7087 /*
7088 * The peer vAU value just read is the peer receiver value. HFI does
7089 * not support a transmit vAU of 0 (AU == 8). We advertised that
7090 * with Z=1 in the fabric capabilities sent to the peer. The peer
7091 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7092 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7093 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7094 * subject to the Z value exception.
7095 */
7096 if (vau == 0)
7097 vau = 1;
7098 set_up_vl15(dd, vau, vl15buf);
7099
7100 /* set up the LCB CRC mode */
7101 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7102
7103 /* order is important: use the lowest bit in common */
7104 if (crc_mask & CAP_CRC_14B)
7105 crc_val = LCB_CRC_14B;
7106 else if (crc_mask & CAP_CRC_48B)
7107 crc_val = LCB_CRC_48B;
7108 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7109 crc_val = LCB_CRC_12B_16B_PER_LANE;
7110 else
7111 crc_val = LCB_CRC_16B;
7112
7113 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7114 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7115 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7116
7117 /* set (14b only) or clear sideband credit */
7118 reg = read_csr(dd, SEND_CM_CTRL);
7119 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7120 write_csr(dd, SEND_CM_CTRL,
7121 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7122 } else {
7123 write_csr(dd, SEND_CM_CTRL,
7124 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7125 }
7126
7127 ppd->link_speed_active = 0; /* invalid value */
7128 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7129 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7130 switch (remote_tx_rate) {
7131 case 0:
7132 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7133 break;
7134 case 1:
7135 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7136 break;
7137 }
7138 } else {
7139 /* actual rate is highest bit of the ANDed rates */
7140 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7141
7142 if (rate & 2)
7143 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7144 else if (rate & 1)
7145 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7146 }
7147 if (ppd->link_speed_active == 0) {
7148 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7149 __func__, (int)remote_tx_rate);
7150 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7151 }
7152
7153 /*
7154 * Cache the values of the supported, enabled, and active
7155 * LTP CRC modes to return in 'portinfo' queries. But the bit
7156 * flags that are returned in the portinfo query differ from
7157 * what's in the link_crc_mask, crc_sizes, and crc_val
7158 * variables. Convert these here.
7159 */
7160 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7161 /* supported crc modes */
7162 ppd->port_ltp_crc_mode |=
7163 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7164 /* enabled crc modes */
7165 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7166 /* active crc mode */
7167
7168 /* set up the remote credit return table */
7169 assign_remote_cm_au_table(dd, vcu);
7170
7171 /*
7172 * The LCB is reset on entry to handle_verify_cap(), so this must
7173 * be applied on every link up.
7174 *
7175 * Adjust LCB error kill enable to kill the link if
7176 * these RBUF errors are seen:
7177 * REPLAY_BUF_MBE_SMASK
7178 * FLIT_INPUT_BUF_MBE_SMASK
7179 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007180 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007181 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7182 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7183 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7184 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7185 }
7186
7187 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7188 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7189
7190 /* give 8051 access to the LCB CSRs */
7191 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7192 set_8051_lcb_access(dd);
7193
7194 ppd->neighbor_guid =
7195 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7196 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7197 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7198 ppd->neighbor_type =
7199 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7200 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7201 ppd->neighbor_fm_security =
7202 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7203 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7204 dd_dev_info(dd,
7205 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7206 ppd->neighbor_guid, ppd->neighbor_type,
7207 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7208 if (ppd->mgmt_allowed)
7209 add_full_mgmt_pkey(ppd);
7210
7211 /* tell the 8051 to go to LinkUp */
7212 set_link_state(ppd, HLS_GOING_UP);
7213}
7214
7215/*
7216 * Apply the link width downgrade enabled policy against the current active
7217 * link widths.
7218 *
7219 * Called when the enabled policy changes or the active link widths change.
7220 */
7221void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7222{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007223 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007224 int tries;
7225 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007226 u16 tx, rx;
7227
Dean Luick323fd782015-11-16 21:59:24 -05007228 /* use the hls lock to avoid a race with actual link up */
7229 tries = 0;
7230retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007231 mutex_lock(&ppd->hls_lock);
7232 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007233 if (!(ppd->host_link_state & HLS_UP)) {
7234 /* still going up..wait and retry */
7235 if (ppd->host_link_state & HLS_GOING_UP) {
7236 if (++tries < 1000) {
7237 mutex_unlock(&ppd->hls_lock);
7238 usleep_range(100, 120); /* arbitrary */
7239 goto retry;
7240 }
7241 dd_dev_err(ppd->dd,
7242 "%s: giving up waiting for link state change\n",
7243 __func__);
7244 }
7245 goto done;
7246 }
7247
7248 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007249
7250 if (refresh_widths) {
7251 get_link_widths(ppd->dd, &tx, &rx);
7252 ppd->link_width_downgrade_tx_active = tx;
7253 ppd->link_width_downgrade_rx_active = rx;
7254 }
7255
7256 if (lwde == 0) {
7257 /* downgrade is disabled */
7258
7259 /* bounce if not at starting active width */
7260 if ((ppd->link_width_active !=
7261 ppd->link_width_downgrade_tx_active)
7262 || (ppd->link_width_active !=
7263 ppd->link_width_downgrade_rx_active)) {
7264 dd_dev_err(ppd->dd,
7265 "Link downgrade is disabled and link has downgraded, downing link\n");
7266 dd_dev_err(ppd->dd,
7267 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7268 ppd->link_width_active,
7269 ppd->link_width_downgrade_tx_active,
7270 ppd->link_width_downgrade_rx_active);
7271 do_bounce = 1;
7272 }
7273 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7274 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7275 /* Tx or Rx is outside the enabled policy */
7276 dd_dev_err(ppd->dd,
7277 "Link is outside of downgrade allowed, downing link\n");
7278 dd_dev_err(ppd->dd,
7279 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7280 lwde,
7281 ppd->link_width_downgrade_tx_active,
7282 ppd->link_width_downgrade_rx_active);
7283 do_bounce = 1;
7284 }
7285
Dean Luick323fd782015-11-16 21:59:24 -05007286done:
7287 mutex_unlock(&ppd->hls_lock);
7288
Mike Marciniszyn77241052015-07-30 15:17:43 -04007289 if (do_bounce) {
7290 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7291 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7292 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007293 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007294 start_link(ppd);
7295 }
7296}
7297
7298/*
7299 * Handle a link downgrade interrupt from the 8051.
7300 *
7301 * This is a work-queue function outside of the interrupt.
7302 */
7303void handle_link_downgrade(struct work_struct *work)
7304{
7305 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7306 link_downgrade_work);
7307
7308 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7309 apply_link_downgrade_policy(ppd, 1);
7310}
7311
7312static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7313{
7314 return flag_string(buf, buf_len, flags, dcc_err_flags,
7315 ARRAY_SIZE(dcc_err_flags));
7316}
7317
7318static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7319{
7320 return flag_string(buf, buf_len, flags, lcb_err_flags,
7321 ARRAY_SIZE(lcb_err_flags));
7322}
7323
7324static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7325{
7326 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7327 ARRAY_SIZE(dc8051_err_flags));
7328}
7329
7330static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7331{
7332 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7333 ARRAY_SIZE(dc8051_info_err_flags));
7334}
7335
7336static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7337{
7338 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7339 ARRAY_SIZE(dc8051_info_host_msg_flags));
7340}
7341
7342static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7343{
7344 struct hfi1_pportdata *ppd = dd->pport;
7345 u64 info, err, host_msg;
7346 int queue_link_down = 0;
7347 char buf[96];
7348
7349 /* look at the flags */
7350 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7351 /* 8051 information set by firmware */
7352 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7353 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7354 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7355 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7356 host_msg = (info >>
7357 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7358 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7359
7360 /*
7361 * Handle error flags.
7362 */
7363 if (err & FAILED_LNI) {
7364 /*
7365 * LNI error indications are cleared by the 8051
7366 * only when starting polling. Only pay attention
7367 * to them when in the states that occur during
7368 * LNI.
7369 */
7370 if (ppd->host_link_state
7371 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7372 queue_link_down = 1;
7373 dd_dev_info(dd, "Link error: %s\n",
7374 dc8051_info_err_string(buf,
7375 sizeof(buf),
7376 err & FAILED_LNI));
7377 }
7378 err &= ~(u64)FAILED_LNI;
7379 }
Dean Luick6d014532015-12-01 15:38:23 -05007380 /* unknown frames can happen durning LNI, just count */
7381 if (err & UNKNOWN_FRAME) {
7382 ppd->unknown_frame_count++;
7383 err &= ~(u64)UNKNOWN_FRAME;
7384 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007385 if (err) {
7386 /* report remaining errors, but do not do anything */
7387 dd_dev_err(dd, "8051 info error: %s\n",
7388 dc8051_info_err_string(buf, sizeof(buf), err));
7389 }
7390
7391 /*
7392 * Handle host message flags.
7393 */
7394 if (host_msg & HOST_REQ_DONE) {
7395 /*
7396 * Presently, the driver does a busy wait for
7397 * host requests to complete. This is only an
7398 * informational message.
7399 * NOTE: The 8051 clears the host message
7400 * information *on the next 8051 command*.
7401 * Therefore, when linkup is achieved,
7402 * this flag will still be set.
7403 */
7404 host_msg &= ~(u64)HOST_REQ_DONE;
7405 }
7406 if (host_msg & BC_SMA_MSG) {
7407 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7408 host_msg &= ~(u64)BC_SMA_MSG;
7409 }
7410 if (host_msg & LINKUP_ACHIEVED) {
7411 dd_dev_info(dd, "8051: Link up\n");
7412 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7413 host_msg &= ~(u64)LINKUP_ACHIEVED;
7414 }
7415 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007416 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007417 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7418 }
7419 if (host_msg & VERIFY_CAP_FRAME) {
7420 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7421 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7422 }
7423 if (host_msg & LINK_GOING_DOWN) {
7424 const char *extra = "";
7425 /* no downgrade action needed if going down */
7426 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7427 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7428 extra = " (ignoring downgrade)";
7429 }
7430 dd_dev_info(dd, "8051: Link down%s\n", extra);
7431 queue_link_down = 1;
7432 host_msg &= ~(u64)LINK_GOING_DOWN;
7433 }
7434 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7435 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7436 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7437 }
7438 if (host_msg) {
7439 /* report remaining messages, but do not do anything */
7440 dd_dev_info(dd, "8051 info host message: %s\n",
7441 dc8051_info_host_msg_string(buf, sizeof(buf),
7442 host_msg));
7443 }
7444
7445 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7446 }
7447 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7448 /*
7449 * Lost the 8051 heartbeat. If this happens, we
7450 * receive constant interrupts about it. Disable
7451 * the interrupt after the first.
7452 */
7453 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7454 write_csr(dd, DC_DC8051_ERR_EN,
7455 read_csr(dd, DC_DC8051_ERR_EN)
7456 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7457
7458 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7459 }
7460 if (reg) {
7461 /* report the error, but do not do anything */
7462 dd_dev_err(dd, "8051 error: %s\n",
7463 dc8051_err_string(buf, sizeof(buf), reg));
7464 }
7465
7466 if (queue_link_down) {
7467 /* if the link is already going down or disabled, do not
7468 * queue another */
7469 if ((ppd->host_link_state
7470 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7471 || ppd->link_enabled == 0) {
7472 dd_dev_info(dd, "%s: not queuing link down\n",
7473 __func__);
7474 } else {
7475 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7476 }
7477 }
7478}
7479
7480static const char * const fm_config_txt[] = {
7481[0] =
7482 "BadHeadDist: Distance violation between two head flits",
7483[1] =
7484 "BadTailDist: Distance violation between two tail flits",
7485[2] =
7486 "BadCtrlDist: Distance violation between two credit control flits",
7487[3] =
7488 "BadCrdAck: Credits return for unsupported VL",
7489[4] =
7490 "UnsupportedVLMarker: Received VL Marker",
7491[5] =
7492 "BadPreempt: Exceeded the preemption nesting level",
7493[6] =
7494 "BadControlFlit: Received unsupported control flit",
7495/* no 7 */
7496[8] =
7497 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7498};
7499
7500static const char * const port_rcv_txt[] = {
7501[1] =
7502 "BadPktLen: Illegal PktLen",
7503[2] =
7504 "PktLenTooLong: Packet longer than PktLen",
7505[3] =
7506 "PktLenTooShort: Packet shorter than PktLen",
7507[4] =
7508 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7509[5] =
7510 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7511[6] =
7512 "BadL2: Illegal L2 opcode",
7513[7] =
7514 "BadSC: Unsupported SC",
7515[9] =
7516 "BadRC: Illegal RC",
7517[11] =
7518 "PreemptError: Preempting with same VL",
7519[12] =
7520 "PreemptVL15: Preempting a VL15 packet",
7521};
7522
7523#define OPA_LDR_FMCONFIG_OFFSET 16
7524#define OPA_LDR_PORTRCV_OFFSET 0
7525static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7526{
7527 u64 info, hdr0, hdr1;
7528 const char *extra;
7529 char buf[96];
7530 struct hfi1_pportdata *ppd = dd->pport;
7531 u8 lcl_reason = 0;
7532 int do_bounce = 0;
7533
7534 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7535 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7536 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7537 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7538 /* set status bit */
7539 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7540 }
7541 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7542 }
7543
7544 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7545 struct hfi1_pportdata *ppd = dd->pport;
7546 /* this counter saturates at (2^32) - 1 */
7547 if (ppd->link_downed < (u32)UINT_MAX)
7548 ppd->link_downed++;
7549 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7550 }
7551
7552 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7553 u8 reason_valid = 1;
7554
7555 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7556 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7557 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7558 /* set status bit */
7559 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7560 }
7561 switch (info) {
7562 case 0:
7563 case 1:
7564 case 2:
7565 case 3:
7566 case 4:
7567 case 5:
7568 case 6:
7569 extra = fm_config_txt[info];
7570 break;
7571 case 8:
7572 extra = fm_config_txt[info];
7573 if (ppd->port_error_action &
7574 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7575 do_bounce = 1;
7576 /*
7577 * lcl_reason cannot be derived from info
7578 * for this error
7579 */
7580 lcl_reason =
7581 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7582 }
7583 break;
7584 default:
7585 reason_valid = 0;
7586 snprintf(buf, sizeof(buf), "reserved%lld", info);
7587 extra = buf;
7588 break;
7589 }
7590
7591 if (reason_valid && !do_bounce) {
7592 do_bounce = ppd->port_error_action &
7593 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7594 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7595 }
7596
7597 /* just report this */
7598 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7599 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7600 }
7601
7602 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7603 u8 reason_valid = 1;
7604
7605 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7606 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7607 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7608 if (!(dd->err_info_rcvport.status_and_code &
7609 OPA_EI_STATUS_SMASK)) {
7610 dd->err_info_rcvport.status_and_code =
7611 info & OPA_EI_CODE_SMASK;
7612 /* set status bit */
7613 dd->err_info_rcvport.status_and_code |=
7614 OPA_EI_STATUS_SMASK;
7615 /* save first 2 flits in the packet that caused
7616 * the error */
7617 dd->err_info_rcvport.packet_flit1 = hdr0;
7618 dd->err_info_rcvport.packet_flit2 = hdr1;
7619 }
7620 switch (info) {
7621 case 1:
7622 case 2:
7623 case 3:
7624 case 4:
7625 case 5:
7626 case 6:
7627 case 7:
7628 case 9:
7629 case 11:
7630 case 12:
7631 extra = port_rcv_txt[info];
7632 break;
7633 default:
7634 reason_valid = 0;
7635 snprintf(buf, sizeof(buf), "reserved%lld", info);
7636 extra = buf;
7637 break;
7638 }
7639
7640 if (reason_valid && !do_bounce) {
7641 do_bounce = ppd->port_error_action &
7642 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7643 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7644 }
7645
7646 /* just report this */
7647 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7648 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7649 hdr0, hdr1);
7650
7651 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7652 }
7653
7654 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7655 /* informative only */
7656 dd_dev_info(dd, "8051 access to LCB blocked\n");
7657 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7658 }
7659 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7660 /* informative only */
7661 dd_dev_info(dd, "host access to LCB blocked\n");
7662 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7663 }
7664
7665 /* report any remaining errors */
7666 if (reg)
7667 dd_dev_info(dd, "DCC Error: %s\n",
7668 dcc_err_string(buf, sizeof(buf), reg));
7669
7670 if (lcl_reason == 0)
7671 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7672
7673 if (do_bounce) {
7674 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7675 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7676 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7677 }
7678}
7679
7680static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7681{
7682 char buf[96];
7683
7684 dd_dev_info(dd, "LCB Error: %s\n",
7685 lcb_err_string(buf, sizeof(buf), reg));
7686}
7687
7688/*
7689 * CCE block DC interrupt. Source is < 8.
7690 */
7691static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7692{
7693 const struct err_reg_info *eri = &dc_errs[source];
7694
7695 if (eri->handler) {
7696 interrupt_clear_down(dd, 0, eri);
7697 } else if (source == 3 /* dc_lbm_int */) {
7698 /*
7699 * This indicates that a parity error has occurred on the
7700 * address/control lines presented to the LBM. The error
7701 * is a single pulse, there is no associated error flag,
7702 * and it is non-maskable. This is because if a parity
7703 * error occurs on the request the request is dropped.
7704 * This should never occur, but it is nice to know if it
7705 * ever does.
7706 */
7707 dd_dev_err(dd, "Parity error in DC LBM block\n");
7708 } else {
7709 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7710 }
7711}
7712
7713/*
7714 * TX block send credit interrupt. Source is < 160.
7715 */
7716static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7717{
7718 sc_group_release_update(dd, source);
7719}
7720
7721/*
7722 * TX block SDMA interrupt. Source is < 48.
7723 *
7724 * SDMA interrupts are grouped by type:
7725 *
7726 * 0 - N-1 = SDma
7727 * N - 2N-1 = SDmaProgress
7728 * 2N - 3N-1 = SDmaIdle
7729 */
7730static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7731{
7732 /* what interrupt */
7733 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7734 /* which engine */
7735 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7736
7737#ifdef CONFIG_SDMA_VERBOSITY
7738 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7739 slashstrip(__FILE__), __LINE__, __func__);
7740 sdma_dumpstate(&dd->per_sdma[which]);
7741#endif
7742
7743 if (likely(what < 3 && which < dd->num_sdma)) {
7744 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7745 } else {
7746 /* should not happen */
7747 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7748 }
7749}
7750
7751/*
7752 * RX block receive available interrupt. Source is < 160.
7753 */
7754static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7755{
7756 struct hfi1_ctxtdata *rcd;
7757 char *err_detail;
7758
7759 if (likely(source < dd->num_rcv_contexts)) {
7760 rcd = dd->rcd[source];
7761 if (rcd) {
7762 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007763 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007764 else
7765 handle_user_interrupt(rcd);
7766 return; /* OK */
7767 }
7768 /* received an interrupt, but no rcd */
7769 err_detail = "dataless";
7770 } else {
7771 /* received an interrupt, but are not using that context */
7772 err_detail = "out of range";
7773 }
7774 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7775 err_detail, source);
7776}
7777
7778/*
7779 * RX block receive urgent interrupt. Source is < 160.
7780 */
7781static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7782{
7783 struct hfi1_ctxtdata *rcd;
7784 char *err_detail;
7785
7786 if (likely(source < dd->num_rcv_contexts)) {
7787 rcd = dd->rcd[source];
7788 if (rcd) {
7789 /* only pay attention to user urgent interrupts */
7790 if (source >= dd->first_user_ctxt)
7791 handle_user_interrupt(rcd);
7792 return; /* OK */
7793 }
7794 /* received an interrupt, but no rcd */
7795 err_detail = "dataless";
7796 } else {
7797 /* received an interrupt, but are not using that context */
7798 err_detail = "out of range";
7799 }
7800 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7801 err_detail, source);
7802}
7803
7804/*
7805 * Reserved range interrupt. Should not be called in normal operation.
7806 */
7807static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7808{
7809 char name[64];
7810
7811 dd_dev_err(dd, "unexpected %s interrupt\n",
7812 is_reserved_name(name, sizeof(name), source));
7813}
7814
7815static const struct is_table is_table[] = {
7816/* start end
7817 name func interrupt func */
7818{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7819 is_misc_err_name, is_misc_err_int },
7820{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7821 is_sdma_eng_err_name, is_sdma_eng_err_int },
7822{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7823 is_sendctxt_err_name, is_sendctxt_err_int },
7824{ IS_SDMA_START, IS_SDMA_END,
7825 is_sdma_eng_name, is_sdma_eng_int },
7826{ IS_VARIOUS_START, IS_VARIOUS_END,
7827 is_various_name, is_various_int },
7828{ IS_DC_START, IS_DC_END,
7829 is_dc_name, is_dc_int },
7830{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7831 is_rcv_avail_name, is_rcv_avail_int },
7832{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7833 is_rcv_urgent_name, is_rcv_urgent_int },
7834{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7835 is_send_credit_name, is_send_credit_int},
7836{ IS_RESERVED_START, IS_RESERVED_END,
7837 is_reserved_name, is_reserved_int},
7838};
7839
7840/*
7841 * Interrupt source interrupt - called when the given source has an interrupt.
7842 * Source is a bit index into an array of 64-bit integers.
7843 */
7844static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7845{
7846 const struct is_table *entry;
7847
7848 /* avoids a double compare by walking the table in-order */
7849 for (entry = &is_table[0]; entry->is_name; entry++) {
7850 if (source < entry->end) {
7851 trace_hfi1_interrupt(dd, entry, source);
7852 entry->is_int(dd, source - entry->start);
7853 return;
7854 }
7855 }
7856 /* fell off the end */
7857 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7858}
7859
7860/*
7861 * General interrupt handler. This is able to correctly handle
7862 * all interrupts in case INTx is used.
7863 */
7864static irqreturn_t general_interrupt(int irq, void *data)
7865{
7866 struct hfi1_devdata *dd = data;
7867 u64 regs[CCE_NUM_INT_CSRS];
7868 u32 bit;
7869 int i;
7870
7871 this_cpu_inc(*dd->int_counter);
7872
7873 /* phase 1: scan and clear all handled interrupts */
7874 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7875 if (dd->gi_mask[i] == 0) {
7876 regs[i] = 0; /* used later */
7877 continue;
7878 }
7879 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7880 dd->gi_mask[i];
7881 /* only clear if anything is set */
7882 if (regs[i])
7883 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7884 }
7885
7886 /* phase 2: call the appropriate handler */
7887 for_each_set_bit(bit, (unsigned long *)&regs[0],
7888 CCE_NUM_INT_CSRS*64) {
7889 is_interrupt(dd, bit);
7890 }
7891
7892 return IRQ_HANDLED;
7893}
7894
7895static irqreturn_t sdma_interrupt(int irq, void *data)
7896{
7897 struct sdma_engine *sde = data;
7898 struct hfi1_devdata *dd = sde->dd;
7899 u64 status;
7900
7901#ifdef CONFIG_SDMA_VERBOSITY
7902 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7903 slashstrip(__FILE__), __LINE__, __func__);
7904 sdma_dumpstate(sde);
7905#endif
7906
7907 this_cpu_inc(*dd->int_counter);
7908
7909 /* This read_csr is really bad in the hot path */
7910 status = read_csr(dd,
7911 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
7912 & sde->imask;
7913 if (likely(status)) {
7914 /* clear the interrupt(s) */
7915 write_csr(dd,
7916 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
7917 status);
7918
7919 /* handle the interrupt(s) */
7920 sdma_engine_interrupt(sde, status);
7921 } else
7922 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
7923 sde->this_idx);
7924
7925 return IRQ_HANDLED;
7926}
7927
7928/*
Dean Luickf4f30031c2015-10-26 10:28:44 -04007929 * Clear the receive interrupt, forcing the write and making sure
7930 * we have data from the chip, pushing everything in front of it
7931 * back to the host.
7932 */
7933static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
7934{
7935 struct hfi1_devdata *dd = rcd->dd;
7936 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
7937
7938 mmiowb(); /* make sure everything before is written */
7939 write_csr(dd, addr, rcd->imask);
7940 /* force the above write on the chip and get a value back */
7941 (void)read_csr(dd, addr);
7942}
7943
7944/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05007945void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007946{
7947 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
7948}
7949
7950/* return non-zero if a packet is present */
7951static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
7952{
7953 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
7954 return (rcd->seq_cnt ==
7955 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
7956
7957 /* else is RDMA rtail */
7958 return (rcd->head != get_rcvhdrtail(rcd));
7959}
7960
7961/*
7962 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
7963 * This routine will try to handle packets immediately (latency), but if
7964 * it finds too many, it will invoke the thread handler (bandwitdh). The
7965 * chip receive interupt is *not* cleared down until this or the thread (if
7966 * invoked) is finished. The intent is to avoid extra interrupts while we
7967 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04007968 */
7969static irqreturn_t receive_context_interrupt(int irq, void *data)
7970{
7971 struct hfi1_ctxtdata *rcd = data;
7972 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04007973 int disposition;
7974 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007975
7976 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
7977 this_cpu_inc(*dd->int_counter);
7978
Dean Luickf4f30031c2015-10-26 10:28:44 -04007979 /* receive interrupt remains blocked while processing packets */
7980 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007981
Dean Luickf4f30031c2015-10-26 10:28:44 -04007982 /*
7983 * Too many packets were seen while processing packets in this
7984 * IRQ handler. Invoke the handler thread. The receive interrupt
7985 * remains blocked.
7986 */
7987 if (disposition == RCV_PKT_LIMIT)
7988 return IRQ_WAKE_THREAD;
7989
7990 /*
7991 * The packet processor detected no more packets. Clear the receive
7992 * interrupt and recheck for a packet packet that may have arrived
7993 * after the previous check and interrupt clear. If a packet arrived,
7994 * force another interrupt.
7995 */
7996 clear_recv_intr(rcd);
7997 present = check_packet_present(rcd);
7998 if (present)
7999 force_recv_intr(rcd);
8000
8001 return IRQ_HANDLED;
8002}
8003
8004/*
8005 * Receive packet thread handler. This expects to be invoked with the
8006 * receive interrupt still blocked.
8007 */
8008static irqreturn_t receive_context_thread(int irq, void *data)
8009{
8010 struct hfi1_ctxtdata *rcd = data;
8011 int present;
8012
8013 /* receive interrupt is still blocked from the IRQ handler */
8014 (void)rcd->do_interrupt(rcd, 1);
8015
8016 /*
8017 * The packet processor will only return if it detected no more
8018 * packets. Hold IRQs here so we can safely clear the interrupt and
8019 * recheck for a packet that may have arrived after the previous
8020 * check and the interrupt clear. If a packet arrived, force another
8021 * interrupt.
8022 */
8023 local_irq_disable();
8024 clear_recv_intr(rcd);
8025 present = check_packet_present(rcd);
8026 if (present)
8027 force_recv_intr(rcd);
8028 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008029
8030 return IRQ_HANDLED;
8031}
8032
8033/* ========================================================================= */
8034
8035u32 read_physical_state(struct hfi1_devdata *dd)
8036{
8037 u64 reg;
8038
8039 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8040 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8041 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8042}
8043
Jim Snowfb9036d2016-01-11 18:32:21 -05008044u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008045{
8046 u64 reg;
8047
8048 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8049 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8050 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8051}
8052
8053static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8054{
8055 u64 reg;
8056
8057 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8058 /* clear current state, set new state */
8059 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8060 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8061 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8062}
8063
8064/*
8065 * Use the 8051 to read a LCB CSR.
8066 */
8067static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8068{
8069 u32 regno;
8070 int ret;
8071
8072 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8073 if (acquire_lcb_access(dd, 0) == 0) {
8074 *data = read_csr(dd, addr);
8075 release_lcb_access(dd, 0);
8076 return 0;
8077 }
8078 return -EBUSY;
8079 }
8080
8081 /* register is an index of LCB registers: (offset - base) / 8 */
8082 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8083 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8084 if (ret != HCMD_SUCCESS)
8085 return -EBUSY;
8086 return 0;
8087}
8088
8089/*
8090 * Read an LCB CSR. Access may not be in host control, so check.
8091 * Return 0 on success, -EBUSY on failure.
8092 */
8093int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8094{
8095 struct hfi1_pportdata *ppd = dd->pport;
8096
8097 /* if up, go through the 8051 for the value */
8098 if (ppd->host_link_state & HLS_UP)
8099 return read_lcb_via_8051(dd, addr, data);
8100 /* if going up or down, no access */
8101 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8102 return -EBUSY;
8103 /* otherwise, host has access */
8104 *data = read_csr(dd, addr);
8105 return 0;
8106}
8107
8108/*
8109 * Use the 8051 to write a LCB CSR.
8110 */
8111static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8112{
Dean Luick3bf40d62015-11-06 20:07:04 -05008113 u32 regno;
8114 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008115
Dean Luick3bf40d62015-11-06 20:07:04 -05008116 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8117 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8118 if (acquire_lcb_access(dd, 0) == 0) {
8119 write_csr(dd, addr, data);
8120 release_lcb_access(dd, 0);
8121 return 0;
8122 }
8123 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008124 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008125
8126 /* register is an index of LCB registers: (offset - base) / 8 */
8127 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8128 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8129 if (ret != HCMD_SUCCESS)
8130 return -EBUSY;
8131 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008132}
8133
8134/*
8135 * Write an LCB CSR. Access may not be in host control, so check.
8136 * Return 0 on success, -EBUSY on failure.
8137 */
8138int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8139{
8140 struct hfi1_pportdata *ppd = dd->pport;
8141
8142 /* if up, go through the 8051 for the value */
8143 if (ppd->host_link_state & HLS_UP)
8144 return write_lcb_via_8051(dd, addr, data);
8145 /* if going up or down, no access */
8146 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8147 return -EBUSY;
8148 /* otherwise, host has access */
8149 write_csr(dd, addr, data);
8150 return 0;
8151}
8152
8153/*
8154 * Returns:
8155 * < 0 = Linux error, not able to get access
8156 * > 0 = 8051 command RETURN_CODE
8157 */
8158static int do_8051_command(
8159 struct hfi1_devdata *dd,
8160 u32 type,
8161 u64 in_data,
8162 u64 *out_data)
8163{
8164 u64 reg, completed;
8165 int return_code;
8166 unsigned long flags;
8167 unsigned long timeout;
8168
8169 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8170
8171 /*
8172 * Alternative to holding the lock for a long time:
8173 * - keep busy wait - have other users bounce off
8174 */
8175 spin_lock_irqsave(&dd->dc8051_lock, flags);
8176
8177 /* We can't send any commands to the 8051 if it's in reset */
8178 if (dd->dc_shutdown) {
8179 return_code = -ENODEV;
8180 goto fail;
8181 }
8182
8183 /*
8184 * If an 8051 host command timed out previously, then the 8051 is
8185 * stuck.
8186 *
8187 * On first timeout, attempt to reset and restart the entire DC
8188 * block (including 8051). (Is this too big of a hammer?)
8189 *
8190 * If the 8051 times out a second time, the reset did not bring it
8191 * back to healthy life. In that case, fail any subsequent commands.
8192 */
8193 if (dd->dc8051_timed_out) {
8194 if (dd->dc8051_timed_out > 1) {
8195 dd_dev_err(dd,
8196 "Previous 8051 host command timed out, skipping command %u\n",
8197 type);
8198 return_code = -ENXIO;
8199 goto fail;
8200 }
8201 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8202 dc_shutdown(dd);
8203 dc_start(dd);
8204 spin_lock_irqsave(&dd->dc8051_lock, flags);
8205 }
8206
8207 /*
8208 * If there is no timeout, then the 8051 command interface is
8209 * waiting for a command.
8210 */
8211
8212 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008213 * When writing a LCB CSR, out_data contains the full value to
8214 * to be written, while in_data contains the relative LCB
8215 * address in 7:0. Do the work here, rather than the caller,
8216 * of distrubting the write data to where it needs to go:
8217 *
8218 * Write data
8219 * 39:00 -> in_data[47:8]
8220 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8221 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8222 */
8223 if (type == HCMD_WRITE_LCB_CSR) {
8224 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8225 reg = ((((*out_data) >> 40) & 0xff) <<
8226 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8227 | ((((*out_data) >> 48) & 0xffff) <<
8228 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8229 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8230 }
8231
8232 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008233 * Do two writes: the first to stabilize the type and req_data, the
8234 * second to activate.
8235 */
8236 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8237 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8238 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8239 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8240 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8241 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8242 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8243
8244 /* wait for completion, alternate: interrupt */
8245 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8246 while (1) {
8247 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8248 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8249 if (completed)
8250 break;
8251 if (time_after(jiffies, timeout)) {
8252 dd->dc8051_timed_out++;
8253 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8254 if (out_data)
8255 *out_data = 0;
8256 return_code = -ETIMEDOUT;
8257 goto fail;
8258 }
8259 udelay(2);
8260 }
8261
8262 if (out_data) {
8263 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8264 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8265 if (type == HCMD_READ_LCB_CSR) {
8266 /* top 16 bits are in a different register */
8267 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8268 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8269 << (48
8270 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8271 }
8272 }
8273 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8274 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8275 dd->dc8051_timed_out = 0;
8276 /*
8277 * Clear command for next user.
8278 */
8279 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8280
8281fail:
8282 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8283
8284 return return_code;
8285}
8286
8287static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8288{
8289 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8290}
8291
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008292int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8293 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008294{
8295 u64 data;
8296 int ret;
8297
8298 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8299 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8300 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8301 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8302 if (ret != HCMD_SUCCESS) {
8303 dd_dev_err(dd,
8304 "load 8051 config: field id %d, lane %d, err %d\n",
8305 (int)field_id, (int)lane_id, ret);
8306 }
8307 return ret;
8308}
8309
8310/*
8311 * Read the 8051 firmware "registers". Use the RAM directly. Always
8312 * set the result, even on error.
8313 * Return 0 on success, -errno on failure
8314 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008315int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8316 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008317{
8318 u64 big_data;
8319 u32 addr;
8320 int ret;
8321
8322 /* address start depends on the lane_id */
8323 if (lane_id < 4)
8324 addr = (4 * NUM_GENERAL_FIELDS)
8325 + (lane_id * 4 * NUM_LANE_FIELDS);
8326 else
8327 addr = 0;
8328 addr += field_id * 4;
8329
8330 /* read is in 8-byte chunks, hardware will truncate the address down */
8331 ret = read_8051_data(dd, addr, 8, &big_data);
8332
8333 if (ret == 0) {
8334 /* extract the 4 bytes we want */
8335 if (addr & 0x4)
8336 *result = (u32)(big_data >> 32);
8337 else
8338 *result = (u32)big_data;
8339 } else {
8340 *result = 0;
8341 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8342 __func__, lane_id, field_id);
8343 }
8344
8345 return ret;
8346}
8347
8348static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8349 u8 continuous)
8350{
8351 u32 frame;
8352
8353 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8354 | power_management << POWER_MANAGEMENT_SHIFT;
8355 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8356 GENERAL_CONFIG, frame);
8357}
8358
8359static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8360 u16 vl15buf, u8 crc_sizes)
8361{
8362 u32 frame;
8363
8364 frame = (u32)vau << VAU_SHIFT
8365 | (u32)z << Z_SHIFT
8366 | (u32)vcu << VCU_SHIFT
8367 | (u32)vl15buf << VL15BUF_SHIFT
8368 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8369 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8370 GENERAL_CONFIG, frame);
8371}
8372
8373static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8374 u8 *flag_bits, u16 *link_widths)
8375{
8376 u32 frame;
8377
8378 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8379 &frame);
8380 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8381 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8382 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8383}
8384
8385static int write_vc_local_link_width(struct hfi1_devdata *dd,
8386 u8 misc_bits,
8387 u8 flag_bits,
8388 u16 link_widths)
8389{
8390 u32 frame;
8391
8392 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8393 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8394 | (u32)link_widths << LINK_WIDTH_SHIFT;
8395 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8396 frame);
8397}
8398
8399static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8400 u8 device_rev)
8401{
8402 u32 frame;
8403
8404 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8405 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8406 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8407}
8408
8409static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8410 u8 *device_rev)
8411{
8412 u32 frame;
8413
8414 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8415 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8416 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8417 & REMOTE_DEVICE_REV_MASK;
8418}
8419
8420void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8421{
8422 u32 frame;
8423
8424 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8425 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8426 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8427}
8428
8429static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8430 u8 *continuous)
8431{
8432 u32 frame;
8433
8434 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8435 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8436 & POWER_MANAGEMENT_MASK;
8437 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8438 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8439}
8440
8441static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8442 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8443{
8444 u32 frame;
8445
8446 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8447 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8448 *z = (frame >> Z_SHIFT) & Z_MASK;
8449 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8450 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8451 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8452}
8453
8454static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8455 u8 *remote_tx_rate,
8456 u16 *link_widths)
8457{
8458 u32 frame;
8459
8460 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8461 &frame);
8462 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8463 & REMOTE_TX_RATE_MASK;
8464 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8465}
8466
8467static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8468{
8469 u32 frame;
8470
8471 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8472 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8473}
8474
8475static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8476{
8477 u32 frame;
8478
8479 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8480 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8481}
8482
8483static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8484{
8485 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8486}
8487
8488static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8489{
8490 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8491}
8492
8493void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8494{
8495 u32 frame;
8496 int ret;
8497
8498 *link_quality = 0;
8499 if (dd->pport->host_link_state & HLS_UP) {
8500 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8501 &frame);
8502 if (ret == 0)
8503 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8504 & LINK_QUALITY_MASK;
8505 }
8506}
8507
8508static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8509{
8510 u32 frame;
8511
8512 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8513 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8514}
8515
8516static int read_tx_settings(struct hfi1_devdata *dd,
8517 u8 *enable_lane_tx,
8518 u8 *tx_polarity_inversion,
8519 u8 *rx_polarity_inversion,
8520 u8 *max_rate)
8521{
8522 u32 frame;
8523 int ret;
8524
8525 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8526 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8527 & ENABLE_LANE_TX_MASK;
8528 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8529 & TX_POLARITY_INVERSION_MASK;
8530 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8531 & RX_POLARITY_INVERSION_MASK;
8532 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8533 return ret;
8534}
8535
8536static int write_tx_settings(struct hfi1_devdata *dd,
8537 u8 enable_lane_tx,
8538 u8 tx_polarity_inversion,
8539 u8 rx_polarity_inversion,
8540 u8 max_rate)
8541{
8542 u32 frame;
8543
8544 /* no need to mask, all variable sizes match field widths */
8545 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8546 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8547 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8548 | max_rate << MAX_RATE_SHIFT;
8549 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8550}
8551
8552static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8553{
8554 u32 frame, version, prod_id;
8555 int ret, lane;
8556
8557 /* 4 lanes */
8558 for (lane = 0; lane < 4; lane++) {
8559 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8560 if (ret) {
8561 dd_dev_err(
8562 dd,
8563 "Unable to read lane %d firmware details\n",
8564 lane);
8565 continue;
8566 }
8567 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8568 & SPICO_ROM_VERSION_MASK;
8569 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8570 & SPICO_ROM_PROD_ID_MASK;
8571 dd_dev_info(dd,
8572 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8573 lane, version, prod_id);
8574 }
8575}
8576
8577/*
8578 * Read an idle LCB message.
8579 *
8580 * Returns 0 on success, -EINVAL on error
8581 */
8582static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8583{
8584 int ret;
8585
8586 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8587 type, data_out);
8588 if (ret != HCMD_SUCCESS) {
8589 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8590 (u32)type, ret);
8591 return -EINVAL;
8592 }
8593 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8594 /* return only the payload as we already know the type */
8595 *data_out >>= IDLE_PAYLOAD_SHIFT;
8596 return 0;
8597}
8598
8599/*
8600 * Read an idle SMA message. To be done in response to a notification from
8601 * the 8051.
8602 *
8603 * Returns 0 on success, -EINVAL on error
8604 */
8605static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8606{
8607 return read_idle_message(dd,
8608 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8609}
8610
8611/*
8612 * Send an idle LCB message.
8613 *
8614 * Returns 0 on success, -EINVAL on error
8615 */
8616static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8617{
8618 int ret;
8619
8620 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8621 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8622 if (ret != HCMD_SUCCESS) {
8623 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8624 data, ret);
8625 return -EINVAL;
8626 }
8627 return 0;
8628}
8629
8630/*
8631 * Send an idle SMA message.
8632 *
8633 * Returns 0 on success, -EINVAL on error
8634 */
8635int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8636{
8637 u64 data;
8638
8639 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8640 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8641 return send_idle_message(dd, data);
8642}
8643
8644/*
8645 * Initialize the LCB then do a quick link up. This may or may not be
8646 * in loopback.
8647 *
8648 * return 0 on success, -errno on error
8649 */
8650static int do_quick_linkup(struct hfi1_devdata *dd)
8651{
8652 u64 reg;
8653 unsigned long timeout;
8654 int ret;
8655
8656 lcb_shutdown(dd, 0);
8657
8658 if (loopback) {
8659 /* LCB_CFG_LOOPBACK.VAL = 2 */
8660 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8661 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8662 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8663 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8664 }
8665
8666 /* start the LCBs */
8667 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8668 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8669
8670 /* simulator only loopback steps */
8671 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8672 /* LCB_CFG_RUN.EN = 1 */
8673 write_csr(dd, DC_LCB_CFG_RUN,
8674 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8675
8676 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8677 timeout = jiffies + msecs_to_jiffies(10);
8678 while (1) {
8679 reg = read_csr(dd,
8680 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8681 if (reg)
8682 break;
8683 if (time_after(jiffies, timeout)) {
8684 dd_dev_err(dd,
8685 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8686 return -ETIMEDOUT;
8687 }
8688 udelay(2);
8689 }
8690
8691 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8692 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8693 }
8694
8695 if (!loopback) {
8696 /*
8697 * When doing quick linkup and not in loopback, both
8698 * sides must be done with LCB set-up before either
8699 * starts the quick linkup. Put a delay here so that
8700 * both sides can be started and have a chance to be
8701 * done with LCB set up before resuming.
8702 */
8703 dd_dev_err(dd,
8704 "Pausing for peer to be finished with LCB set up\n");
8705 msleep(5000);
8706 dd_dev_err(dd,
8707 "Continuing with quick linkup\n");
8708 }
8709
8710 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8711 set_8051_lcb_access(dd);
8712
8713 /*
8714 * State "quick" LinkUp request sets the physical link state to
8715 * LinkUp without a verify capability sequence.
8716 * This state is in simulator v37 and later.
8717 */
8718 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8719 if (ret != HCMD_SUCCESS) {
8720 dd_dev_err(dd,
8721 "%s: set physical link state to quick LinkUp failed with return %d\n",
8722 __func__, ret);
8723
8724 set_host_lcb_access(dd);
8725 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8726
8727 if (ret >= 0)
8728 ret = -EINVAL;
8729 return ret;
8730 }
8731
8732 return 0; /* success */
8733}
8734
8735/*
8736 * Set the SerDes to internal loopback mode.
8737 * Returns 0 on success, -errno on error.
8738 */
8739static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8740{
8741 int ret;
8742
8743 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8744 if (ret == HCMD_SUCCESS)
8745 return 0;
8746 dd_dev_err(dd,
8747 "Set physical link state to SerDes Loopback failed with return %d\n",
8748 ret);
8749 if (ret >= 0)
8750 ret = -EINVAL;
8751 return ret;
8752}
8753
8754/*
8755 * Do all special steps to set up loopback.
8756 */
8757static int init_loopback(struct hfi1_devdata *dd)
8758{
8759 dd_dev_info(dd, "Entering loopback mode\n");
8760
8761 /* all loopbacks should disable self GUID check */
8762 write_csr(dd, DC_DC8051_CFG_MODE,
8763 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8764
8765 /*
8766 * The simulator has only one loopback option - LCB. Switch
8767 * to that option, which includes quick link up.
8768 *
8769 * Accept all valid loopback values.
8770 */
8771 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8772 && (loopback == LOOPBACK_SERDES
8773 || loopback == LOOPBACK_LCB
8774 || loopback == LOOPBACK_CABLE)) {
8775 loopback = LOOPBACK_LCB;
8776 quick_linkup = 1;
8777 return 0;
8778 }
8779
8780 /* handle serdes loopback */
8781 if (loopback == LOOPBACK_SERDES) {
8782 /* internal serdes loopack needs quick linkup on RTL */
8783 if (dd->icode == ICODE_RTL_SILICON)
8784 quick_linkup = 1;
8785 return set_serdes_loopback_mode(dd);
8786 }
8787
8788 /* LCB loopback - handled at poll time */
8789 if (loopback == LOOPBACK_LCB) {
8790 quick_linkup = 1; /* LCB is always quick linkup */
8791
8792 /* not supported in emulation due to emulation RTL changes */
8793 if (dd->icode == ICODE_FPGA_EMULATION) {
8794 dd_dev_err(dd,
8795 "LCB loopback not supported in emulation\n");
8796 return -EINVAL;
8797 }
8798 return 0;
8799 }
8800
8801 /* external cable loopback requires no extra steps */
8802 if (loopback == LOOPBACK_CABLE)
8803 return 0;
8804
8805 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8806 return -EINVAL;
8807}
8808
8809/*
8810 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8811 * used in the Verify Capability link width attribute.
8812 */
8813static u16 opa_to_vc_link_widths(u16 opa_widths)
8814{
8815 int i;
8816 u16 result = 0;
8817
8818 static const struct link_bits {
8819 u16 from;
8820 u16 to;
8821 } opa_link_xlate[] = {
8822 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8823 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8824 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8825 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8826 };
8827
8828 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8829 if (opa_widths & opa_link_xlate[i].from)
8830 result |= opa_link_xlate[i].to;
8831 }
8832 return result;
8833}
8834
8835/*
8836 * Set link attributes before moving to polling.
8837 */
8838static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8839{
8840 struct hfi1_devdata *dd = ppd->dd;
8841 u8 enable_lane_tx;
8842 u8 tx_polarity_inversion;
8843 u8 rx_polarity_inversion;
8844 int ret;
8845
8846 /* reset our fabric serdes to clear any lingering problems */
8847 fabric_serdes_reset(dd);
8848
8849 /* set the local tx rate - need to read-modify-write */
8850 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8851 &rx_polarity_inversion, &ppd->local_tx_rate);
8852 if (ret)
8853 goto set_local_link_attributes_fail;
8854
8855 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8856 /* set the tx rate to the fastest enabled */
8857 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8858 ppd->local_tx_rate = 1;
8859 else
8860 ppd->local_tx_rate = 0;
8861 } else {
8862 /* set the tx rate to all enabled */
8863 ppd->local_tx_rate = 0;
8864 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8865 ppd->local_tx_rate |= 2;
8866 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8867 ppd->local_tx_rate |= 1;
8868 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008869
8870 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008871 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8872 rx_polarity_inversion, ppd->local_tx_rate);
8873 if (ret != HCMD_SUCCESS)
8874 goto set_local_link_attributes_fail;
8875
8876 /*
8877 * DC supports continuous updates.
8878 */
8879 ret = write_vc_local_phy(dd, 0 /* no power management */,
8880 1 /* continuous updates */);
8881 if (ret != HCMD_SUCCESS)
8882 goto set_local_link_attributes_fail;
8883
8884 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8885 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8886 ppd->port_crc_mode_enabled);
8887 if (ret != HCMD_SUCCESS)
8888 goto set_local_link_attributes_fail;
8889
8890 ret = write_vc_local_link_width(dd, 0, 0,
8891 opa_to_vc_link_widths(ppd->link_width_enabled));
8892 if (ret != HCMD_SUCCESS)
8893 goto set_local_link_attributes_fail;
8894
8895 /* let peer know who we are */
8896 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8897 if (ret == HCMD_SUCCESS)
8898 return 0;
8899
8900set_local_link_attributes_fail:
8901 dd_dev_err(dd,
8902 "Failed to set local link attributes, return 0x%x\n",
8903 ret);
8904 return ret;
8905}
8906
8907/*
8908 * Call this to start the link. Schedule a retry if the cable is not
8909 * present or if unable to start polling. Do not do anything if the
8910 * link is disabled. Returns 0 if link is disabled or moved to polling
8911 */
8912int start_link(struct hfi1_pportdata *ppd)
8913{
8914 if (!ppd->link_enabled) {
8915 dd_dev_info(ppd->dd,
8916 "%s: stopping link start because link is disabled\n",
8917 __func__);
8918 return 0;
8919 }
8920 if (!ppd->driver_link_ready) {
8921 dd_dev_info(ppd->dd,
8922 "%s: stopping link start because driver is not ready\n",
8923 __func__);
8924 return 0;
8925 }
8926
8927 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
8928 loopback == LOOPBACK_LCB ||
8929 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8930 return set_link_state(ppd, HLS_DN_POLL);
8931
8932 dd_dev_info(ppd->dd,
8933 "%s: stopping link start because no cable is present\n",
8934 __func__);
8935 return -EAGAIN;
8936}
8937
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008938static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
8939{
8940 struct hfi1_devdata *dd = ppd->dd;
8941 u64 mask;
8942 unsigned long timeout;
8943
8944 /*
8945 * Check for QSFP interrupt for t_init (SFF 8679)
8946 */
8947 timeout = jiffies + msecs_to_jiffies(2000);
8948 while (1) {
8949 mask = read_csr(dd, dd->hfi1_id ?
8950 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
8951 if (!(mask & QSFP_HFI0_INT_N)) {
8952 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
8953 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
8954 break;
8955 }
8956 if (time_after(jiffies, timeout)) {
8957 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
8958 __func__);
8959 break;
8960 }
8961 udelay(2);
8962 }
8963}
8964
8965static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
8966{
8967 struct hfi1_devdata *dd = ppd->dd;
8968 u64 mask;
8969
8970 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
8971 if (enable)
8972 mask |= (u64)QSFP_HFI0_INT_N;
8973 else
8974 mask &= ~(u64)QSFP_HFI0_INT_N;
8975 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
8976}
8977
8978void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008979{
8980 struct hfi1_devdata *dd = ppd->dd;
8981 u64 mask, qsfp_mask;
8982
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008983 /* Disable INT_N from triggering QSFP interrupts */
8984 set_qsfp_int_n(ppd, 0);
8985
8986 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008987 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008988 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008989 qsfp_mask |= mask;
8990 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008991 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008992
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008993 qsfp_mask = read_csr(dd, dd->hfi1_id ?
8994 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008995 qsfp_mask &= ~mask;
8996 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008997 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008998
8999 udelay(10);
9000
9001 qsfp_mask |= mask;
9002 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009003 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9004
9005 wait_for_qsfp_init(ppd);
9006
9007 /*
9008 * Allow INT_N to trigger the QSFP interrupt to watch
9009 * for alarms and warnings
9010 */
9011 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009012}
9013
9014static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9015 u8 *qsfp_interrupt_status)
9016{
9017 struct hfi1_devdata *dd = ppd->dd;
9018
9019 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9020 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9021 dd_dev_info(dd,
9022 "%s: QSFP cable on fire\n",
9023 __func__);
9024
9025 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9026 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9027 dd_dev_info(dd,
9028 "%s: QSFP cable temperature too low\n",
9029 __func__);
9030
9031 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9032 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9033 dd_dev_info(dd,
9034 "%s: QSFP supply voltage too high\n",
9035 __func__);
9036
9037 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9038 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9039 dd_dev_info(dd,
9040 "%s: QSFP supply voltage too low\n",
9041 __func__);
9042
9043 /* Byte 2 is vendor specific */
9044
9045 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9046 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9047 dd_dev_info(dd,
9048 "%s: Cable RX channel 1/2 power too high\n",
9049 __func__);
9050
9051 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9052 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9053 dd_dev_info(dd,
9054 "%s: Cable RX channel 1/2 power too low\n",
9055 __func__);
9056
9057 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9058 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9059 dd_dev_info(dd,
9060 "%s: Cable RX channel 3/4 power too high\n",
9061 __func__);
9062
9063 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9064 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9065 dd_dev_info(dd,
9066 "%s: Cable RX channel 3/4 power too low\n",
9067 __func__);
9068
9069 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9070 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9071 dd_dev_info(dd,
9072 "%s: Cable TX channel 1/2 bias too high\n",
9073 __func__);
9074
9075 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9076 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9077 dd_dev_info(dd,
9078 "%s: Cable TX channel 1/2 bias too low\n",
9079 __func__);
9080
9081 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9082 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9083 dd_dev_info(dd,
9084 "%s: Cable TX channel 3/4 bias too high\n",
9085 __func__);
9086
9087 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9088 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9089 dd_dev_info(dd,
9090 "%s: Cable TX channel 3/4 bias too low\n",
9091 __func__);
9092
9093 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9094 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9095 dd_dev_info(dd,
9096 "%s: Cable TX channel 1/2 power too high\n",
9097 __func__);
9098
9099 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9100 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9101 dd_dev_info(dd,
9102 "%s: Cable TX channel 1/2 power too low\n",
9103 __func__);
9104
9105 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9106 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9107 dd_dev_info(dd,
9108 "%s: Cable TX channel 3/4 power too high\n",
9109 __func__);
9110
9111 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9112 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9113 dd_dev_info(dd,
9114 "%s: Cable TX channel 3/4 power too low\n",
9115 __func__);
9116
9117 /* Bytes 9-10 and 11-12 are reserved */
9118 /* Bytes 13-15 are vendor specific */
9119
9120 return 0;
9121}
9122
Mike Marciniszyn77241052015-07-30 15:17:43 -04009123/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009124void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009125{
9126 struct qsfp_data *qd;
9127 struct hfi1_pportdata *ppd;
9128 struct hfi1_devdata *dd;
9129
9130 qd = container_of(work, struct qsfp_data, qsfp_work);
9131 ppd = qd->ppd;
9132 dd = ppd->dd;
9133
9134 /* Sanity check */
9135 if (!qsfp_mod_present(ppd))
9136 return;
9137
9138 /*
9139 * Turn DC back on after cables has been
9140 * re-inserted. Up until now, the DC has been in
9141 * reset to save power.
9142 */
9143 dc_start(dd);
9144
9145 if (qd->cache_refresh_required) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009146
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009147 set_qsfp_int_n(ppd, 0);
9148
9149 wait_for_qsfp_init(ppd);
9150
9151 /*
9152 * Allow INT_N to trigger the QSFP interrupt to watch
9153 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009154 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009155 set_qsfp_int_n(ppd, 1);
9156
9157 tune_serdes(ppd);
9158
9159 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009160 }
9161
9162 if (qd->check_interrupt_flags) {
9163 u8 qsfp_interrupt_status[16] = {0,};
9164
9165 if (qsfp_read(ppd, dd->hfi1_id, 6,
9166 &qsfp_interrupt_status[0], 16) != 16) {
9167 dd_dev_info(dd,
9168 "%s: Failed to read status of QSFP module\n",
9169 __func__);
9170 } else {
9171 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009172
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009173 handle_qsfp_error_conditions(
9174 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009175 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9176 ppd->qsfp_info.check_interrupt_flags = 0;
9177 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9178 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009179 }
9180 }
9181}
9182
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009183static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009184{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009185 struct hfi1_pportdata *ppd = dd->pport;
9186 u64 qsfp_mask, cce_int_mask;
9187 const int qsfp1_int_smask = QSFP1_INT % 64;
9188 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009189
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009190 /*
9191 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9192 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9193 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9194 * the index of the appropriate CSR in the CCEIntMask CSR array
9195 */
9196 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9197 (8 * (QSFP1_INT / 64)));
9198 if (dd->hfi1_id) {
9199 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9200 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9201 cce_int_mask);
9202 } else {
9203 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9204 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9205 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009206 }
9207
Mike Marciniszyn77241052015-07-30 15:17:43 -04009208 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9209 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009210 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9211 qsfp_mask);
9212 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9213 qsfp_mask);
9214
9215 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009216
9217 /* Handle active low nature of INT_N and MODPRST_N pins */
9218 if (qsfp_mod_present(ppd))
9219 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9220 write_csr(dd,
9221 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9222 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009223}
9224
Dean Luickbbdeb332015-12-01 15:38:15 -05009225/*
9226 * Do a one-time initialize of the LCB block.
9227 */
9228static void init_lcb(struct hfi1_devdata *dd)
9229{
9230 /* the DC has been reset earlier in the driver load */
9231
9232 /* set LCB for cclk loopback on the port */
9233 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9234 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9235 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9236 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9237 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9238 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9239 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9240}
9241
Mike Marciniszyn77241052015-07-30 15:17:43 -04009242int bringup_serdes(struct hfi1_pportdata *ppd)
9243{
9244 struct hfi1_devdata *dd = ppd->dd;
9245 u64 guid;
9246 int ret;
9247
9248 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9249 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9250
9251 guid = ppd->guid;
9252 if (!guid) {
9253 if (dd->base_guid)
9254 guid = dd->base_guid + ppd->port - 1;
9255 ppd->guid = guid;
9256 }
9257
Mike Marciniszyn77241052015-07-30 15:17:43 -04009258 /* Set linkinit_reason on power up per OPA spec */
9259 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9260
Dean Luickbbdeb332015-12-01 15:38:15 -05009261 /* one-time init of the LCB */
9262 init_lcb(dd);
9263
Mike Marciniszyn77241052015-07-30 15:17:43 -04009264 if (loopback) {
9265 ret = init_loopback(dd);
9266 if (ret < 0)
9267 return ret;
9268 }
9269
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009270 /* tune the SERDES to a ballpark setting for
9271 * optimal signal and bit error rate
9272 * Needs to be done before starting the link
9273 */
9274 tune_serdes(ppd);
9275
Mike Marciniszyn77241052015-07-30 15:17:43 -04009276 return start_link(ppd);
9277}
9278
9279void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9280{
9281 struct hfi1_devdata *dd = ppd->dd;
9282
9283 /*
9284 * Shut down the link and keep it down. First turn off that the
9285 * driver wants to allow the link to be up (driver_link_ready).
9286 * Then make sure the link is not automatically restarted
9287 * (link_enabled). Cancel any pending restart. And finally
9288 * go offline.
9289 */
9290 ppd->driver_link_ready = 0;
9291 ppd->link_enabled = 0;
9292
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009293 ppd->offline_disabled_reason =
9294 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009295 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9296 OPA_LINKDOWN_REASON_SMA_DISABLED);
9297 set_link_state(ppd, HLS_DN_OFFLINE);
9298
9299 /* disable the port */
9300 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9301}
9302
9303static inline int init_cpu_counters(struct hfi1_devdata *dd)
9304{
9305 struct hfi1_pportdata *ppd;
9306 int i;
9307
9308 ppd = (struct hfi1_pportdata *)(dd + 1);
9309 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009310 ppd->ibport_data.rvp.rc_acks = NULL;
9311 ppd->ibport_data.rvp.rc_qacks = NULL;
9312 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9313 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9314 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9315 if (!ppd->ibport_data.rvp.rc_acks ||
9316 !ppd->ibport_data.rvp.rc_delayed_comp ||
9317 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009318 return -ENOMEM;
9319 }
9320
9321 return 0;
9322}
9323
9324static const char * const pt_names[] = {
9325 "expected",
9326 "eager",
9327 "invalid"
9328};
9329
9330static const char *pt_name(u32 type)
9331{
9332 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9333}
9334
9335/*
9336 * index is the index into the receive array
9337 */
9338void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9339 u32 type, unsigned long pa, u16 order)
9340{
9341 u64 reg;
9342 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9343 (dd->kregbase + RCV_ARRAY));
9344
9345 if (!(dd->flags & HFI1_PRESENT))
9346 goto done;
9347
9348 if (type == PT_INVALID) {
9349 pa = 0;
9350 } else if (type > PT_INVALID) {
9351 dd_dev_err(dd,
9352 "unexpected receive array type %u for index %u, not handled\n",
9353 type, index);
9354 goto done;
9355 }
9356
9357 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9358 pt_name(type), index, pa, (unsigned long)order);
9359
9360#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9361 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9362 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9363 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9364 << RCV_ARRAY_RT_ADDR_SHIFT;
9365 writeq(reg, base + (index * 8));
9366
9367 if (type == PT_EAGER)
9368 /*
9369 * Eager entries are written one-by-one so we have to push them
9370 * after we write the entry.
9371 */
9372 flush_wc();
9373done:
9374 return;
9375}
9376
9377void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9378{
9379 struct hfi1_devdata *dd = rcd->dd;
9380 u32 i;
9381
9382 /* this could be optimized */
9383 for (i = rcd->eager_base; i < rcd->eager_base +
9384 rcd->egrbufs.alloced; i++)
9385 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9386
9387 for (i = rcd->expected_base;
9388 i < rcd->expected_base + rcd->expected_count; i++)
9389 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9390}
9391
9392int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9393 struct hfi1_ctxt_info *kinfo)
9394{
9395 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9396 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9397 return 0;
9398}
9399
9400struct hfi1_message_header *hfi1_get_msgheader(
9401 struct hfi1_devdata *dd, __le32 *rhf_addr)
9402{
9403 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9404
9405 return (struct hfi1_message_header *)
9406 (rhf_addr - dd->rhf_offset + offset);
9407}
9408
9409static const char * const ib_cfg_name_strings[] = {
9410 "HFI1_IB_CFG_LIDLMC",
9411 "HFI1_IB_CFG_LWID_DG_ENB",
9412 "HFI1_IB_CFG_LWID_ENB",
9413 "HFI1_IB_CFG_LWID",
9414 "HFI1_IB_CFG_SPD_ENB",
9415 "HFI1_IB_CFG_SPD",
9416 "HFI1_IB_CFG_RXPOL_ENB",
9417 "HFI1_IB_CFG_LREV_ENB",
9418 "HFI1_IB_CFG_LINKLATENCY",
9419 "HFI1_IB_CFG_HRTBT",
9420 "HFI1_IB_CFG_OP_VLS",
9421 "HFI1_IB_CFG_VL_HIGH_CAP",
9422 "HFI1_IB_CFG_VL_LOW_CAP",
9423 "HFI1_IB_CFG_OVERRUN_THRESH",
9424 "HFI1_IB_CFG_PHYERR_THRESH",
9425 "HFI1_IB_CFG_LINKDEFAULT",
9426 "HFI1_IB_CFG_PKEYS",
9427 "HFI1_IB_CFG_MTU",
9428 "HFI1_IB_CFG_LSTATE",
9429 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9430 "HFI1_IB_CFG_PMA_TICKS",
9431 "HFI1_IB_CFG_PORT"
9432};
9433
9434static const char *ib_cfg_name(int which)
9435{
9436 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9437 return "invalid";
9438 return ib_cfg_name_strings[which];
9439}
9440
9441int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9442{
9443 struct hfi1_devdata *dd = ppd->dd;
9444 int val = 0;
9445
9446 switch (which) {
9447 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9448 val = ppd->link_width_enabled;
9449 break;
9450 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9451 val = ppd->link_width_active;
9452 break;
9453 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9454 val = ppd->link_speed_enabled;
9455 break;
9456 case HFI1_IB_CFG_SPD: /* current Link speed */
9457 val = ppd->link_speed_active;
9458 break;
9459
9460 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9461 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9462 case HFI1_IB_CFG_LINKLATENCY:
9463 goto unimplemented;
9464
9465 case HFI1_IB_CFG_OP_VLS:
9466 val = ppd->vls_operational;
9467 break;
9468 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9469 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9470 break;
9471 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9472 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9473 break;
9474 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9475 val = ppd->overrun_threshold;
9476 break;
9477 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9478 val = ppd->phy_error_threshold;
9479 break;
9480 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9481 val = dd->link_default;
9482 break;
9483
9484 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9485 case HFI1_IB_CFG_PMA_TICKS:
9486 default:
9487unimplemented:
9488 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9489 dd_dev_info(
9490 dd,
9491 "%s: which %s: not implemented\n",
9492 __func__,
9493 ib_cfg_name(which));
9494 break;
9495 }
9496
9497 return val;
9498}
9499
9500/*
9501 * The largest MAD packet size.
9502 */
9503#define MAX_MAD_PACKET 2048
9504
9505/*
9506 * Return the maximum header bytes that can go on the _wire_
9507 * for this device. This count includes the ICRC which is
9508 * not part of the packet held in memory but it is appended
9509 * by the HW.
9510 * This is dependent on the device's receive header entry size.
9511 * HFI allows this to be set per-receive context, but the
9512 * driver presently enforces a global value.
9513 */
9514u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9515{
9516 /*
9517 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9518 * the Receive Header Entry Size minus the PBC (or RHF) size
9519 * plus one DW for the ICRC appended by HW.
9520 *
9521 * dd->rcd[0].rcvhdrqentsize is in DW.
9522 * We use rcd[0] as all context will have the same value. Also,
9523 * the first kernel context would have been allocated by now so
9524 * we are guaranteed a valid value.
9525 */
9526 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9527}
9528
9529/*
9530 * Set Send Length
9531 * @ppd - per port data
9532 *
9533 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9534 * registers compare against LRH.PktLen, so use the max bytes included
9535 * in the LRH.
9536 *
9537 * This routine changes all VL values except VL15, which it maintains at
9538 * the same value.
9539 */
9540static void set_send_length(struct hfi1_pportdata *ppd)
9541{
9542 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009543 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9544 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009545 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9546 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9547 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9548 int i;
9549
9550 for (i = 0; i < ppd->vls_supported; i++) {
9551 if (dd->vld[i].mtu > maxvlmtu)
9552 maxvlmtu = dd->vld[i].mtu;
9553 if (i <= 3)
9554 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9555 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9556 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9557 else
9558 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9559 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9560 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9561 }
9562 write_csr(dd, SEND_LEN_CHECK0, len1);
9563 write_csr(dd, SEND_LEN_CHECK1, len2);
9564 /* adjust kernel credit return thresholds based on new MTUs */
9565 /* all kernel receive contexts have the same hdrqentsize */
9566 for (i = 0; i < ppd->vls_supported; i++) {
9567 sc_set_cr_threshold(dd->vld[i].sc,
9568 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9569 dd->rcd[0]->rcvhdrqentsize));
9570 }
9571 sc_set_cr_threshold(dd->vld[15].sc,
9572 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9573 dd->rcd[0]->rcvhdrqentsize));
9574
9575 /* Adjust maximum MTU for the port in DC */
9576 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9577 (ilog2(maxvlmtu >> 8) + 1);
9578 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9579 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9580 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9581 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9582 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9583}
9584
9585static void set_lidlmc(struct hfi1_pportdata *ppd)
9586{
9587 int i;
9588 u64 sreg = 0;
9589 struct hfi1_devdata *dd = ppd->dd;
9590 u32 mask = ~((1U << ppd->lmc) - 1);
9591 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9592
9593 if (dd->hfi1_snoop.mode_flag)
9594 dd_dev_info(dd, "Set lid/lmc while snooping");
9595
9596 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9597 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9598 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9599 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9600 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9601 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9602 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9603
9604 /*
9605 * Iterate over all the send contexts and set their SLID check
9606 */
9607 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9608 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9609 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9610 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9611
9612 for (i = 0; i < dd->chip_send_contexts; i++) {
9613 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9614 i, (u32)sreg);
9615 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9616 }
9617
9618 /* Now we have to do the same thing for the sdma engines */
9619 sdma_update_lmc(dd, mask, ppd->lid);
9620}
9621
9622static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9623{
9624 unsigned long timeout;
9625 u32 curr_state;
9626
9627 timeout = jiffies + msecs_to_jiffies(msecs);
9628 while (1) {
9629 curr_state = read_physical_state(dd);
9630 if (curr_state == state)
9631 break;
9632 if (time_after(jiffies, timeout)) {
9633 dd_dev_err(dd,
9634 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9635 state, curr_state);
9636 return -ETIMEDOUT;
9637 }
9638 usleep_range(1950, 2050); /* sleep 2ms-ish */
9639 }
9640
9641 return 0;
9642}
9643
9644/*
9645 * Helper for set_link_state(). Do not call except from that routine.
9646 * Expects ppd->hls_mutex to be held.
9647 *
9648 * @rem_reason value to be sent to the neighbor
9649 *
9650 * LinkDownReasons only set if transition succeeds.
9651 */
9652static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9653{
9654 struct hfi1_devdata *dd = ppd->dd;
9655 u32 pstate, previous_state;
9656 u32 last_local_state;
9657 u32 last_remote_state;
9658 int ret;
9659 int do_transition;
9660 int do_wait;
9661
9662 previous_state = ppd->host_link_state;
9663 ppd->host_link_state = HLS_GOING_OFFLINE;
9664 pstate = read_physical_state(dd);
9665 if (pstate == PLS_OFFLINE) {
9666 do_transition = 0; /* in right state */
9667 do_wait = 0; /* ...no need to wait */
9668 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9669 do_transition = 0; /* in an offline transient state */
9670 do_wait = 1; /* ...wait for it to settle */
9671 } else {
9672 do_transition = 1; /* need to move to offline */
9673 do_wait = 1; /* ...will need to wait */
9674 }
9675
9676 if (do_transition) {
9677 ret = set_physical_link_state(dd,
9678 PLS_OFFLINE | (rem_reason << 8));
9679
9680 if (ret != HCMD_SUCCESS) {
9681 dd_dev_err(dd,
9682 "Failed to transition to Offline link state, return %d\n",
9683 ret);
9684 return -EINVAL;
9685 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009686 if (ppd->offline_disabled_reason ==
9687 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009688 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009689 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009690 }
9691
9692 if (do_wait) {
9693 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009694 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009695 if (ret < 0)
9696 return ret;
9697 }
9698
9699 /* make sure the logical state is also down */
9700 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9701
9702 /*
9703 * Now in charge of LCB - must be after the physical state is
9704 * offline.quiet and before host_link_state is changed.
9705 */
9706 set_host_lcb_access(dd);
9707 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9708 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9709
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009710 if (ppd->port_type == PORT_TYPE_QSFP &&
9711 ppd->qsfp_info.limiting_active &&
9712 qsfp_mod_present(ppd)) {
9713 set_qsfp_tx(ppd, 0);
9714 }
9715
Mike Marciniszyn77241052015-07-30 15:17:43 -04009716 /*
9717 * The LNI has a mandatory wait time after the physical state
9718 * moves to Offline.Quiet. The wait time may be different
9719 * depending on how the link went down. The 8051 firmware
9720 * will observe the needed wait time and only move to ready
9721 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009722 * is 6s, so wait that long and then at least 0.5s more for
9723 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009724 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009725 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009726 if (ret) {
9727 dd_dev_err(dd,
9728 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9729 /* state is really offline, so make it so */
9730 ppd->host_link_state = HLS_DN_OFFLINE;
9731 return ret;
9732 }
9733
9734 /*
9735 * The state is now offline and the 8051 is ready to accept host
9736 * requests.
9737 * - change our state
9738 * - notify others if we were previously in a linkup state
9739 */
9740 ppd->host_link_state = HLS_DN_OFFLINE;
9741 if (previous_state & HLS_UP) {
9742 /* went down while link was up */
9743 handle_linkup_change(dd, 0);
9744 } else if (previous_state
9745 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9746 /* went down while attempting link up */
9747 /* byte 1 of last_*_state is the failure reason */
9748 read_last_local_state(dd, &last_local_state);
9749 read_last_remote_state(dd, &last_remote_state);
9750 dd_dev_err(dd,
9751 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9752 last_local_state, last_remote_state);
9753 }
9754
9755 /* the active link width (downgrade) is 0 on link down */
9756 ppd->link_width_active = 0;
9757 ppd->link_width_downgrade_tx_active = 0;
9758 ppd->link_width_downgrade_rx_active = 0;
9759 ppd->current_egress_rate = 0;
9760 return 0;
9761}
9762
9763/* return the link state name */
9764static const char *link_state_name(u32 state)
9765{
9766 const char *name;
9767 int n = ilog2(state);
9768 static const char * const names[] = {
9769 [__HLS_UP_INIT_BP] = "INIT",
9770 [__HLS_UP_ARMED_BP] = "ARMED",
9771 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9772 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9773 [__HLS_DN_POLL_BP] = "POLL",
9774 [__HLS_DN_DISABLE_BP] = "DISABLE",
9775 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9776 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9777 [__HLS_GOING_UP_BP] = "GOING_UP",
9778 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9779 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9780 };
9781
9782 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9783 return name ? name : "unknown";
9784}
9785
9786/* return the link state reason name */
9787static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9788{
9789 if (state == HLS_UP_INIT) {
9790 switch (ppd->linkinit_reason) {
9791 case OPA_LINKINIT_REASON_LINKUP:
9792 return "(LINKUP)";
9793 case OPA_LINKINIT_REASON_FLAPPING:
9794 return "(FLAPPING)";
9795 case OPA_LINKINIT_OUTSIDE_POLICY:
9796 return "(OUTSIDE_POLICY)";
9797 case OPA_LINKINIT_QUARANTINED:
9798 return "(QUARANTINED)";
9799 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9800 return "(INSUFIC_CAPABILITY)";
9801 default:
9802 break;
9803 }
9804 }
9805 return "";
9806}
9807
9808/*
9809 * driver_physical_state - convert the driver's notion of a port's
9810 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9811 * Return -1 (converted to a u32) to indicate error.
9812 */
9813u32 driver_physical_state(struct hfi1_pportdata *ppd)
9814{
9815 switch (ppd->host_link_state) {
9816 case HLS_UP_INIT:
9817 case HLS_UP_ARMED:
9818 case HLS_UP_ACTIVE:
9819 return IB_PORTPHYSSTATE_LINKUP;
9820 case HLS_DN_POLL:
9821 return IB_PORTPHYSSTATE_POLLING;
9822 case HLS_DN_DISABLE:
9823 return IB_PORTPHYSSTATE_DISABLED;
9824 case HLS_DN_OFFLINE:
9825 return OPA_PORTPHYSSTATE_OFFLINE;
9826 case HLS_VERIFY_CAP:
9827 return IB_PORTPHYSSTATE_POLLING;
9828 case HLS_GOING_UP:
9829 return IB_PORTPHYSSTATE_POLLING;
9830 case HLS_GOING_OFFLINE:
9831 return OPA_PORTPHYSSTATE_OFFLINE;
9832 case HLS_LINK_COOLDOWN:
9833 return OPA_PORTPHYSSTATE_OFFLINE;
9834 case HLS_DN_DOWNDEF:
9835 default:
9836 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9837 ppd->host_link_state);
9838 return -1;
9839 }
9840}
9841
9842/*
9843 * driver_logical_state - convert the driver's notion of a port's
9844 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9845 * (converted to a u32) to indicate error.
9846 */
9847u32 driver_logical_state(struct hfi1_pportdata *ppd)
9848{
9849 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9850 return IB_PORT_DOWN;
9851
9852 switch (ppd->host_link_state & HLS_UP) {
9853 case HLS_UP_INIT:
9854 return IB_PORT_INIT;
9855 case HLS_UP_ARMED:
9856 return IB_PORT_ARMED;
9857 case HLS_UP_ACTIVE:
9858 return IB_PORT_ACTIVE;
9859 default:
9860 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9861 ppd->host_link_state);
9862 return -1;
9863 }
9864}
9865
9866void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9867 u8 neigh_reason, u8 rem_reason)
9868{
9869 if (ppd->local_link_down_reason.latest == 0 &&
9870 ppd->neigh_link_down_reason.latest == 0) {
9871 ppd->local_link_down_reason.latest = lcl_reason;
9872 ppd->neigh_link_down_reason.latest = neigh_reason;
9873 ppd->remote_link_down_reason = rem_reason;
9874 }
9875}
9876
9877/*
9878 * Change the physical and/or logical link state.
9879 *
9880 * Do not call this routine while inside an interrupt. It contains
9881 * calls to routines that can take multiple seconds to finish.
9882 *
9883 * Returns 0 on success, -errno on failure.
9884 */
9885int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9886{
9887 struct hfi1_devdata *dd = ppd->dd;
9888 struct ib_event event = {.device = NULL};
9889 int ret1, ret = 0;
9890 int was_up, is_down;
9891 int orig_new_state, poll_bounce;
9892
9893 mutex_lock(&ppd->hls_lock);
9894
9895 orig_new_state = state;
9896 if (state == HLS_DN_DOWNDEF)
9897 state = dd->link_default;
9898
9899 /* interpret poll -> poll as a link bounce */
9900 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9901 && state == HLS_DN_POLL;
9902
9903 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
9904 link_state_name(ppd->host_link_state),
9905 link_state_name(orig_new_state),
9906 poll_bounce ? "(bounce) " : "",
9907 link_state_reason_name(ppd, state));
9908
9909 was_up = !!(ppd->host_link_state & HLS_UP);
9910
9911 /*
9912 * If we're going to a (HLS_*) link state that implies the logical
9913 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
9914 * reset is_sm_config_started to 0.
9915 */
9916 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
9917 ppd->is_sm_config_started = 0;
9918
9919 /*
9920 * Do nothing if the states match. Let a poll to poll link bounce
9921 * go through.
9922 */
9923 if (ppd->host_link_state == state && !poll_bounce)
9924 goto done;
9925
9926 switch (state) {
9927 case HLS_UP_INIT:
9928 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
9929 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
9930 /*
9931 * Quick link up jumps from polling to here.
9932 *
9933 * Whether in normal or loopback mode, the
9934 * simulator jumps from polling to link up.
9935 * Accept that here.
9936 */
9937 /* OK */;
9938 } else if (ppd->host_link_state != HLS_GOING_UP) {
9939 goto unexpected;
9940 }
9941
9942 ppd->host_link_state = HLS_UP_INIT;
9943 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
9944 if (ret) {
9945 /* logical state didn't change, stay at going_up */
9946 ppd->host_link_state = HLS_GOING_UP;
9947 dd_dev_err(dd,
9948 "%s: logical state did not change to INIT\n",
9949 __func__);
9950 } else {
9951 /* clear old transient LINKINIT_REASON code */
9952 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
9953 ppd->linkinit_reason =
9954 OPA_LINKINIT_REASON_LINKUP;
9955
9956 /* enable the port */
9957 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9958
9959 handle_linkup_change(dd, 1);
9960 }
9961 break;
9962 case HLS_UP_ARMED:
9963 if (ppd->host_link_state != HLS_UP_INIT)
9964 goto unexpected;
9965
9966 ppd->host_link_state = HLS_UP_ARMED;
9967 set_logical_state(dd, LSTATE_ARMED);
9968 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
9969 if (ret) {
9970 /* logical state didn't change, stay at init */
9971 ppd->host_link_state = HLS_UP_INIT;
9972 dd_dev_err(dd,
9973 "%s: logical state did not change to ARMED\n",
9974 __func__);
9975 }
9976 /*
9977 * The simulator does not currently implement SMA messages,
9978 * so neighbor_normal is not set. Set it here when we first
9979 * move to Armed.
9980 */
9981 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9982 ppd->neighbor_normal = 1;
9983 break;
9984 case HLS_UP_ACTIVE:
9985 if (ppd->host_link_state != HLS_UP_ARMED)
9986 goto unexpected;
9987
9988 ppd->host_link_state = HLS_UP_ACTIVE;
9989 set_logical_state(dd, LSTATE_ACTIVE);
9990 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
9991 if (ret) {
9992 /* logical state didn't change, stay at armed */
9993 ppd->host_link_state = HLS_UP_ARMED;
9994 dd_dev_err(dd,
9995 "%s: logical state did not change to ACTIVE\n",
9996 __func__);
9997 } else {
9998
9999 /* tell all engines to go running */
10000 sdma_all_running(dd);
10001
10002 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010003 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010004 event.element.port_num = ppd->port;
10005 event.event = IB_EVENT_PORT_ACTIVE;
10006 }
10007 break;
10008 case HLS_DN_POLL:
10009 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10010 ppd->host_link_state == HLS_DN_OFFLINE) &&
10011 dd->dc_shutdown)
10012 dc_start(dd);
10013 /* Hand LED control to the DC */
10014 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10015
10016 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10017 u8 tmp = ppd->link_enabled;
10018
10019 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10020 if (ret) {
10021 ppd->link_enabled = tmp;
10022 break;
10023 }
10024 ppd->remote_link_down_reason = 0;
10025
10026 if (ppd->driver_link_ready)
10027 ppd->link_enabled = 1;
10028 }
10029
Jim Snowfb9036d2016-01-11 18:32:21 -050010030 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010031 ret = set_local_link_attributes(ppd);
10032 if (ret)
10033 break;
10034
10035 ppd->port_error_action = 0;
10036 ppd->host_link_state = HLS_DN_POLL;
10037
10038 if (quick_linkup) {
10039 /* quick linkup does not go into polling */
10040 ret = do_quick_linkup(dd);
10041 } else {
10042 ret1 = set_physical_link_state(dd, PLS_POLLING);
10043 if (ret1 != HCMD_SUCCESS) {
10044 dd_dev_err(dd,
10045 "Failed to transition to Polling link state, return 0x%x\n",
10046 ret1);
10047 ret = -EINVAL;
10048 }
10049 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010050 ppd->offline_disabled_reason =
10051 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010052 /*
10053 * If an error occurred above, go back to offline. The
10054 * caller may reschedule another attempt.
10055 */
10056 if (ret)
10057 goto_offline(ppd, 0);
10058 break;
10059 case HLS_DN_DISABLE:
10060 /* link is disabled */
10061 ppd->link_enabled = 0;
10062
10063 /* allow any state to transition to disabled */
10064
10065 /* must transition to offline first */
10066 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10067 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10068 if (ret)
10069 break;
10070 ppd->remote_link_down_reason = 0;
10071 }
10072
10073 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10074 if (ret1 != HCMD_SUCCESS) {
10075 dd_dev_err(dd,
10076 "Failed to transition to Disabled link state, return 0x%x\n",
10077 ret1);
10078 ret = -EINVAL;
10079 break;
10080 }
10081 ppd->host_link_state = HLS_DN_DISABLE;
10082 dc_shutdown(dd);
10083 break;
10084 case HLS_DN_OFFLINE:
10085 if (ppd->host_link_state == HLS_DN_DISABLE)
10086 dc_start(dd);
10087
10088 /* allow any state to transition to offline */
10089 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10090 if (!ret)
10091 ppd->remote_link_down_reason = 0;
10092 break;
10093 case HLS_VERIFY_CAP:
10094 if (ppd->host_link_state != HLS_DN_POLL)
10095 goto unexpected;
10096 ppd->host_link_state = HLS_VERIFY_CAP;
10097 break;
10098 case HLS_GOING_UP:
10099 if (ppd->host_link_state != HLS_VERIFY_CAP)
10100 goto unexpected;
10101
10102 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10103 if (ret1 != HCMD_SUCCESS) {
10104 dd_dev_err(dd,
10105 "Failed to transition to link up state, return 0x%x\n",
10106 ret1);
10107 ret = -EINVAL;
10108 break;
10109 }
10110 ppd->host_link_state = HLS_GOING_UP;
10111 break;
10112
10113 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10114 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10115 default:
10116 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10117 __func__, state);
10118 ret = -EINVAL;
10119 break;
10120 }
10121
10122 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10123 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10124
10125 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10126 ppd->neigh_link_down_reason.sma == 0) {
10127 ppd->local_link_down_reason.sma =
10128 ppd->local_link_down_reason.latest;
10129 ppd->neigh_link_down_reason.sma =
10130 ppd->neigh_link_down_reason.latest;
10131 }
10132
10133 goto done;
10134
10135unexpected:
10136 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10137 __func__, link_state_name(ppd->host_link_state),
10138 link_state_name(state));
10139 ret = -EINVAL;
10140
10141done:
10142 mutex_unlock(&ppd->hls_lock);
10143
10144 if (event.device)
10145 ib_dispatch_event(&event);
10146
10147 return ret;
10148}
10149
10150int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10151{
10152 u64 reg;
10153 int ret = 0;
10154
10155 switch (which) {
10156 case HFI1_IB_CFG_LIDLMC:
10157 set_lidlmc(ppd);
10158 break;
10159 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10160 /*
10161 * The VL Arbitrator high limit is sent in units of 4k
10162 * bytes, while HFI stores it in units of 64 bytes.
10163 */
10164 val *= 4096/64;
10165 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10166 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10167 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10168 break;
10169 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10170 /* HFI only supports POLL as the default link down state */
10171 if (val != HLS_DN_POLL)
10172 ret = -EINVAL;
10173 break;
10174 case HFI1_IB_CFG_OP_VLS:
10175 if (ppd->vls_operational != val) {
10176 ppd->vls_operational = val;
10177 if (!ppd->port)
10178 ret = -EINVAL;
10179 else
10180 ret = sdma_map_init(
10181 ppd->dd,
10182 ppd->port - 1,
10183 val,
10184 NULL);
10185 }
10186 break;
10187 /*
10188 * For link width, link width downgrade, and speed enable, always AND
10189 * the setting with what is actually supported. This has two benefits.
10190 * First, enabled can't have unsupported values, no matter what the
10191 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10192 * "fill in with your supported value" have all the bits in the
10193 * field set, so simply ANDing with supported has the desired result.
10194 */
10195 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10196 ppd->link_width_enabled = val & ppd->link_width_supported;
10197 break;
10198 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10199 ppd->link_width_downgrade_enabled =
10200 val & ppd->link_width_downgrade_supported;
10201 break;
10202 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10203 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10204 break;
10205 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10206 /*
10207 * HFI does not follow IB specs, save this value
10208 * so we can report it, if asked.
10209 */
10210 ppd->overrun_threshold = val;
10211 break;
10212 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10213 /*
10214 * HFI does not follow IB specs, save this value
10215 * so we can report it, if asked.
10216 */
10217 ppd->phy_error_threshold = val;
10218 break;
10219
10220 case HFI1_IB_CFG_MTU:
10221 set_send_length(ppd);
10222 break;
10223
10224 case HFI1_IB_CFG_PKEYS:
10225 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10226 set_partition_keys(ppd);
10227 break;
10228
10229 default:
10230 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10231 dd_dev_info(ppd->dd,
10232 "%s: which %s, val 0x%x: not implemented\n",
10233 __func__, ib_cfg_name(which), val);
10234 break;
10235 }
10236 return ret;
10237}
10238
10239/* begin functions related to vl arbitration table caching */
10240static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10241{
10242 int i;
10243
10244 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10245 VL_ARB_LOW_PRIO_TABLE_SIZE);
10246 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10247 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10248
10249 /*
10250 * Note that we always return values directly from the
10251 * 'vl_arb_cache' (and do no CSR reads) in response to a
10252 * 'Get(VLArbTable)'. This is obviously correct after a
10253 * 'Set(VLArbTable)', since the cache will then be up to
10254 * date. But it's also correct prior to any 'Set(VLArbTable)'
10255 * since then both the cache, and the relevant h/w registers
10256 * will be zeroed.
10257 */
10258
10259 for (i = 0; i < MAX_PRIO_TABLE; i++)
10260 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10261}
10262
10263/*
10264 * vl_arb_lock_cache
10265 *
10266 * All other vl_arb_* functions should be called only after locking
10267 * the cache.
10268 */
10269static inline struct vl_arb_cache *
10270vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10271{
10272 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10273 return NULL;
10274 spin_lock(&ppd->vl_arb_cache[idx].lock);
10275 return &ppd->vl_arb_cache[idx];
10276}
10277
10278static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10279{
10280 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10281}
10282
10283static void vl_arb_get_cache(struct vl_arb_cache *cache,
10284 struct ib_vl_weight_elem *vl)
10285{
10286 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10287}
10288
10289static void vl_arb_set_cache(struct vl_arb_cache *cache,
10290 struct ib_vl_weight_elem *vl)
10291{
10292 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10293}
10294
10295static int vl_arb_match_cache(struct vl_arb_cache *cache,
10296 struct ib_vl_weight_elem *vl)
10297{
10298 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10299}
10300/* end functions related to vl arbitration table caching */
10301
10302static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10303 u32 size, struct ib_vl_weight_elem *vl)
10304{
10305 struct hfi1_devdata *dd = ppd->dd;
10306 u64 reg;
10307 unsigned int i, is_up = 0;
10308 int drain, ret = 0;
10309
10310 mutex_lock(&ppd->hls_lock);
10311
10312 if (ppd->host_link_state & HLS_UP)
10313 is_up = 1;
10314
10315 drain = !is_ax(dd) && is_up;
10316
10317 if (drain)
10318 /*
10319 * Before adjusting VL arbitration weights, empty per-VL
10320 * FIFOs, otherwise a packet whose VL weight is being
10321 * set to 0 could get stuck in a FIFO with no chance to
10322 * egress.
10323 */
10324 ret = stop_drain_data_vls(dd);
10325
10326 if (ret) {
10327 dd_dev_err(
10328 dd,
10329 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10330 __func__);
10331 goto err;
10332 }
10333
10334 for (i = 0; i < size; i++, vl++) {
10335 /*
10336 * NOTE: The low priority shift and mask are used here, but
10337 * they are the same for both the low and high registers.
10338 */
10339 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10340 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10341 | (((u64)vl->weight
10342 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10343 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10344 write_csr(dd, target + (i * 8), reg);
10345 }
10346 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10347
10348 if (drain)
10349 open_fill_data_vls(dd); /* reopen all VLs */
10350
10351err:
10352 mutex_unlock(&ppd->hls_lock);
10353
10354 return ret;
10355}
10356
10357/*
10358 * Read one credit merge VL register.
10359 */
10360static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10361 struct vl_limit *vll)
10362{
10363 u64 reg = read_csr(dd, csr);
10364
10365 vll->dedicated = cpu_to_be16(
10366 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10367 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10368 vll->shared = cpu_to_be16(
10369 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10370 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10371}
10372
10373/*
10374 * Read the current credit merge limits.
10375 */
10376static int get_buffer_control(struct hfi1_devdata *dd,
10377 struct buffer_control *bc, u16 *overall_limit)
10378{
10379 u64 reg;
10380 int i;
10381
10382 /* not all entries are filled in */
10383 memset(bc, 0, sizeof(*bc));
10384
10385 /* OPA and HFI have a 1-1 mapping */
10386 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10387 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10388
10389 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10390 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10391
10392 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10393 bc->overall_shared_limit = cpu_to_be16(
10394 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10395 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10396 if (overall_limit)
10397 *overall_limit = (reg
10398 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10399 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10400 return sizeof(struct buffer_control);
10401}
10402
10403static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10404{
10405 u64 reg;
10406 int i;
10407
10408 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10409 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10410 for (i = 0; i < sizeof(u64); i++) {
10411 u8 byte = *(((u8 *)&reg) + i);
10412
10413 dp->vlnt[2 * i] = byte & 0xf;
10414 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10415 }
10416
10417 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10418 for (i = 0; i < sizeof(u64); i++) {
10419 u8 byte = *(((u8 *)&reg) + i);
10420
10421 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10422 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10423 }
10424 return sizeof(struct sc2vlnt);
10425}
10426
10427static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10428 struct ib_vl_weight_elem *vl)
10429{
10430 unsigned int i;
10431
10432 for (i = 0; i < nelems; i++, vl++) {
10433 vl->vl = 0xf;
10434 vl->weight = 0;
10435 }
10436}
10437
10438static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10439{
10440 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10441 DC_SC_VL_VAL(15_0,
10442 0, dp->vlnt[0] & 0xf,
10443 1, dp->vlnt[1] & 0xf,
10444 2, dp->vlnt[2] & 0xf,
10445 3, dp->vlnt[3] & 0xf,
10446 4, dp->vlnt[4] & 0xf,
10447 5, dp->vlnt[5] & 0xf,
10448 6, dp->vlnt[6] & 0xf,
10449 7, dp->vlnt[7] & 0xf,
10450 8, dp->vlnt[8] & 0xf,
10451 9, dp->vlnt[9] & 0xf,
10452 10, dp->vlnt[10] & 0xf,
10453 11, dp->vlnt[11] & 0xf,
10454 12, dp->vlnt[12] & 0xf,
10455 13, dp->vlnt[13] & 0xf,
10456 14, dp->vlnt[14] & 0xf,
10457 15, dp->vlnt[15] & 0xf));
10458 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10459 DC_SC_VL_VAL(31_16,
10460 16, dp->vlnt[16] & 0xf,
10461 17, dp->vlnt[17] & 0xf,
10462 18, dp->vlnt[18] & 0xf,
10463 19, dp->vlnt[19] & 0xf,
10464 20, dp->vlnt[20] & 0xf,
10465 21, dp->vlnt[21] & 0xf,
10466 22, dp->vlnt[22] & 0xf,
10467 23, dp->vlnt[23] & 0xf,
10468 24, dp->vlnt[24] & 0xf,
10469 25, dp->vlnt[25] & 0xf,
10470 26, dp->vlnt[26] & 0xf,
10471 27, dp->vlnt[27] & 0xf,
10472 28, dp->vlnt[28] & 0xf,
10473 29, dp->vlnt[29] & 0xf,
10474 30, dp->vlnt[30] & 0xf,
10475 31, dp->vlnt[31] & 0xf));
10476}
10477
10478static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10479 u16 limit)
10480{
10481 if (limit != 0)
10482 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10483 what, (int)limit, idx);
10484}
10485
10486/* change only the shared limit portion of SendCmGLobalCredit */
10487static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10488{
10489 u64 reg;
10490
10491 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10492 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10493 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10494 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10495}
10496
10497/* change only the total credit limit portion of SendCmGLobalCredit */
10498static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10499{
10500 u64 reg;
10501
10502 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10503 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10504 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10505 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10506}
10507
10508/* set the given per-VL shared limit */
10509static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10510{
10511 u64 reg;
10512 u32 addr;
10513
10514 if (vl < TXE_NUM_DATA_VL)
10515 addr = SEND_CM_CREDIT_VL + (8 * vl);
10516 else
10517 addr = SEND_CM_CREDIT_VL15;
10518
10519 reg = read_csr(dd, addr);
10520 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10521 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10522 write_csr(dd, addr, reg);
10523}
10524
10525/* set the given per-VL dedicated limit */
10526static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10527{
10528 u64 reg;
10529 u32 addr;
10530
10531 if (vl < TXE_NUM_DATA_VL)
10532 addr = SEND_CM_CREDIT_VL + (8 * vl);
10533 else
10534 addr = SEND_CM_CREDIT_VL15;
10535
10536 reg = read_csr(dd, addr);
10537 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10538 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10539 write_csr(dd, addr, reg);
10540}
10541
10542/* spin until the given per-VL status mask bits clear */
10543static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10544 const char *which)
10545{
10546 unsigned long timeout;
10547 u64 reg;
10548
10549 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10550 while (1) {
10551 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10552
10553 if (reg == 0)
10554 return; /* success */
10555 if (time_after(jiffies, timeout))
10556 break; /* timed out */
10557 udelay(1);
10558 }
10559
10560 dd_dev_err(dd,
10561 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10562 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10563 /*
10564 * If this occurs, it is likely there was a credit loss on the link.
10565 * The only recovery from that is a link bounce.
10566 */
10567 dd_dev_err(dd,
10568 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10569}
10570
10571/*
10572 * The number of credits on the VLs may be changed while everything
10573 * is "live", but the following algorithm must be followed due to
10574 * how the hardware is actually implemented. In particular,
10575 * Return_Credit_Status[] is the only correct status check.
10576 *
10577 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10578 * set Global_Shared_Credit_Limit = 0
10579 * use_all_vl = 1
10580 * mask0 = all VLs that are changing either dedicated or shared limits
10581 * set Shared_Limit[mask0] = 0
10582 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10583 * if (changing any dedicated limit)
10584 * mask1 = all VLs that are lowering dedicated limits
10585 * lower Dedicated_Limit[mask1]
10586 * spin until Return_Credit_Status[mask1] == 0
10587 * raise Dedicated_Limits
10588 * raise Shared_Limits
10589 * raise Global_Shared_Credit_Limit
10590 *
10591 * lower = if the new limit is lower, set the limit to the new value
10592 * raise = if the new limit is higher than the current value (may be changed
10593 * earlier in the algorithm), set the new limit to the new value
10594 */
10595static int set_buffer_control(struct hfi1_devdata *dd,
10596 struct buffer_control *new_bc)
10597{
10598 u64 changing_mask, ld_mask, stat_mask;
10599 int change_count;
10600 int i, use_all_mask;
10601 int this_shared_changing;
10602 /*
10603 * A0: add the variable any_shared_limit_changing below and in the
10604 * algorithm above. If removing A0 support, it can be removed.
10605 */
10606 int any_shared_limit_changing;
10607 struct buffer_control cur_bc;
10608 u8 changing[OPA_MAX_VLS];
10609 u8 lowering_dedicated[OPA_MAX_VLS];
10610 u16 cur_total;
10611 u32 new_total = 0;
10612 const u64 all_mask =
10613 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10614 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10615 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10616 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10617 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10618 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10619 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10620 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10621 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10622
10623#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10624#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10625
10626
10627 /* find the new total credits, do sanity check on unused VLs */
10628 for (i = 0; i < OPA_MAX_VLS; i++) {
10629 if (valid_vl(i)) {
10630 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10631 continue;
10632 }
10633 nonzero_msg(dd, i, "dedicated",
10634 be16_to_cpu(new_bc->vl[i].dedicated));
10635 nonzero_msg(dd, i, "shared",
10636 be16_to_cpu(new_bc->vl[i].shared));
10637 new_bc->vl[i].dedicated = 0;
10638 new_bc->vl[i].shared = 0;
10639 }
10640 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010641
Mike Marciniszyn77241052015-07-30 15:17:43 -040010642 /* fetch the current values */
10643 get_buffer_control(dd, &cur_bc, &cur_total);
10644
10645 /*
10646 * Create the masks we will use.
10647 */
10648 memset(changing, 0, sizeof(changing));
10649 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10650 /* NOTE: Assumes that the individual VL bits are adjacent and in
10651 increasing order */
10652 stat_mask =
10653 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10654 changing_mask = 0;
10655 ld_mask = 0;
10656 change_count = 0;
10657 any_shared_limit_changing = 0;
10658 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10659 if (!valid_vl(i))
10660 continue;
10661 this_shared_changing = new_bc->vl[i].shared
10662 != cur_bc.vl[i].shared;
10663 if (this_shared_changing)
10664 any_shared_limit_changing = 1;
10665 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10666 || this_shared_changing) {
10667 changing[i] = 1;
10668 changing_mask |= stat_mask;
10669 change_count++;
10670 }
10671 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10672 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10673 lowering_dedicated[i] = 1;
10674 ld_mask |= stat_mask;
10675 }
10676 }
10677
10678 /* bracket the credit change with a total adjustment */
10679 if (new_total > cur_total)
10680 set_global_limit(dd, new_total);
10681
10682 /*
10683 * Start the credit change algorithm.
10684 */
10685 use_all_mask = 0;
10686 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010687 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10688 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010689 set_global_shared(dd, 0);
10690 cur_bc.overall_shared_limit = 0;
10691 use_all_mask = 1;
10692 }
10693
10694 for (i = 0; i < NUM_USABLE_VLS; i++) {
10695 if (!valid_vl(i))
10696 continue;
10697
10698 if (changing[i]) {
10699 set_vl_shared(dd, i, 0);
10700 cur_bc.vl[i].shared = 0;
10701 }
10702 }
10703
10704 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10705 "shared");
10706
10707 if (change_count > 0) {
10708 for (i = 0; i < NUM_USABLE_VLS; i++) {
10709 if (!valid_vl(i))
10710 continue;
10711
10712 if (lowering_dedicated[i]) {
10713 set_vl_dedicated(dd, i,
10714 be16_to_cpu(new_bc->vl[i].dedicated));
10715 cur_bc.vl[i].dedicated =
10716 new_bc->vl[i].dedicated;
10717 }
10718 }
10719
10720 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10721
10722 /* now raise all dedicated that are going up */
10723 for (i = 0; i < NUM_USABLE_VLS; i++) {
10724 if (!valid_vl(i))
10725 continue;
10726
10727 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10728 be16_to_cpu(cur_bc.vl[i].dedicated))
10729 set_vl_dedicated(dd, i,
10730 be16_to_cpu(new_bc->vl[i].dedicated));
10731 }
10732 }
10733
10734 /* next raise all shared that are going up */
10735 for (i = 0; i < NUM_USABLE_VLS; i++) {
10736 if (!valid_vl(i))
10737 continue;
10738
10739 if (be16_to_cpu(new_bc->vl[i].shared) >
10740 be16_to_cpu(cur_bc.vl[i].shared))
10741 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10742 }
10743
10744 /* finally raise the global shared */
10745 if (be16_to_cpu(new_bc->overall_shared_limit) >
10746 be16_to_cpu(cur_bc.overall_shared_limit))
10747 set_global_shared(dd,
10748 be16_to_cpu(new_bc->overall_shared_limit));
10749
10750 /* bracket the credit change with a total adjustment */
10751 if (new_total < cur_total)
10752 set_global_limit(dd, new_total);
10753 return 0;
10754}
10755
10756/*
10757 * Read the given fabric manager table. Return the size of the
10758 * table (in bytes) on success, and a negative error code on
10759 * failure.
10760 */
10761int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10762
10763{
10764 int size;
10765 struct vl_arb_cache *vlc;
10766
10767 switch (which) {
10768 case FM_TBL_VL_HIGH_ARB:
10769 size = 256;
10770 /*
10771 * OPA specifies 128 elements (of 2 bytes each), though
10772 * HFI supports only 16 elements in h/w.
10773 */
10774 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10775 vl_arb_get_cache(vlc, t);
10776 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10777 break;
10778 case FM_TBL_VL_LOW_ARB:
10779 size = 256;
10780 /*
10781 * OPA specifies 128 elements (of 2 bytes each), though
10782 * HFI supports only 16 elements in h/w.
10783 */
10784 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10785 vl_arb_get_cache(vlc, t);
10786 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10787 break;
10788 case FM_TBL_BUFFER_CONTROL:
10789 size = get_buffer_control(ppd->dd, t, NULL);
10790 break;
10791 case FM_TBL_SC2VLNT:
10792 size = get_sc2vlnt(ppd->dd, t);
10793 break;
10794 case FM_TBL_VL_PREEMPT_ELEMS:
10795 size = 256;
10796 /* OPA specifies 128 elements, of 2 bytes each */
10797 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10798 break;
10799 case FM_TBL_VL_PREEMPT_MATRIX:
10800 size = 256;
10801 /*
10802 * OPA specifies that this is the same size as the VL
10803 * arbitration tables (i.e., 256 bytes).
10804 */
10805 break;
10806 default:
10807 return -EINVAL;
10808 }
10809 return size;
10810}
10811
10812/*
10813 * Write the given fabric manager table.
10814 */
10815int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10816{
10817 int ret = 0;
10818 struct vl_arb_cache *vlc;
10819
10820 switch (which) {
10821 case FM_TBL_VL_HIGH_ARB:
10822 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10823 if (vl_arb_match_cache(vlc, t)) {
10824 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10825 break;
10826 }
10827 vl_arb_set_cache(vlc, t);
10828 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10829 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10830 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10831 break;
10832 case FM_TBL_VL_LOW_ARB:
10833 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10834 if (vl_arb_match_cache(vlc, t)) {
10835 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10836 break;
10837 }
10838 vl_arb_set_cache(vlc, t);
10839 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10840 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10841 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10842 break;
10843 case FM_TBL_BUFFER_CONTROL:
10844 ret = set_buffer_control(ppd->dd, t);
10845 break;
10846 case FM_TBL_SC2VLNT:
10847 set_sc2vlnt(ppd->dd, t);
10848 break;
10849 default:
10850 ret = -EINVAL;
10851 }
10852 return ret;
10853}
10854
10855/*
10856 * Disable all data VLs.
10857 *
10858 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10859 */
10860static int disable_data_vls(struct hfi1_devdata *dd)
10861{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010862 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010863 return 1;
10864
10865 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10866
10867 return 0;
10868}
10869
10870/*
10871 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10872 * Just re-enables all data VLs (the "fill" part happens
10873 * automatically - the name was chosen for symmetry with
10874 * stop_drain_data_vls()).
10875 *
10876 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10877 */
10878int open_fill_data_vls(struct hfi1_devdata *dd)
10879{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010880 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010881 return 1;
10882
10883 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10884
10885 return 0;
10886}
10887
10888/*
10889 * drain_data_vls() - assumes that disable_data_vls() has been called,
10890 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10891 * engines to drop to 0.
10892 */
10893static void drain_data_vls(struct hfi1_devdata *dd)
10894{
10895 sc_wait(dd);
10896 sdma_wait(dd);
10897 pause_for_credit_return(dd);
10898}
10899
10900/*
10901 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
10902 *
10903 * Use open_fill_data_vls() to resume using data VLs. This pair is
10904 * meant to be used like this:
10905 *
10906 * stop_drain_data_vls(dd);
10907 * // do things with per-VL resources
10908 * open_fill_data_vls(dd);
10909 */
10910int stop_drain_data_vls(struct hfi1_devdata *dd)
10911{
10912 int ret;
10913
10914 ret = disable_data_vls(dd);
10915 if (ret == 0)
10916 drain_data_vls(dd);
10917
10918 return ret;
10919}
10920
10921/*
10922 * Convert a nanosecond time to a cclock count. No matter how slow
10923 * the cclock, a non-zero ns will always have a non-zero result.
10924 */
10925u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
10926{
10927 u32 cclocks;
10928
10929 if (dd->icode == ICODE_FPGA_EMULATION)
10930 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
10931 else /* simulation pretends to be ASIC */
10932 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
10933 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
10934 cclocks = 1;
10935 return cclocks;
10936}
10937
10938/*
10939 * Convert a cclock count to nanoseconds. Not matter how slow
10940 * the cclock, a non-zero cclocks will always have a non-zero result.
10941 */
10942u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
10943{
10944 u32 ns;
10945
10946 if (dd->icode == ICODE_FPGA_EMULATION)
10947 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
10948 else /* simulation pretends to be ASIC */
10949 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
10950 if (cclocks && !ns)
10951 ns = 1;
10952 return ns;
10953}
10954
10955/*
10956 * Dynamically adjust the receive interrupt timeout for a context based on
10957 * incoming packet rate.
10958 *
10959 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
10960 */
10961static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
10962{
10963 struct hfi1_devdata *dd = rcd->dd;
10964 u32 timeout = rcd->rcvavail_timeout;
10965
10966 /*
10967 * This algorithm doubles or halves the timeout depending on whether
10968 * the number of packets received in this interrupt were less than or
10969 * greater equal the interrupt count.
10970 *
10971 * The calculations below do not allow a steady state to be achieved.
10972 * Only at the endpoints it is possible to have an unchanging
10973 * timeout.
10974 */
10975 if (npkts < rcv_intr_count) {
10976 /*
10977 * Not enough packets arrived before the timeout, adjust
10978 * timeout downward.
10979 */
10980 if (timeout < 2) /* already at minimum? */
10981 return;
10982 timeout >>= 1;
10983 } else {
10984 /*
10985 * More than enough packets arrived before the timeout, adjust
10986 * timeout upward.
10987 */
10988 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
10989 return;
10990 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
10991 }
10992
10993 rcd->rcvavail_timeout = timeout;
10994 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
10995 been verified to be in range */
10996 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
10997 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
10998}
10999
11000void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11001 u32 intr_adjust, u32 npkts)
11002{
11003 struct hfi1_devdata *dd = rcd->dd;
11004 u64 reg;
11005 u32 ctxt = rcd->ctxt;
11006
11007 /*
11008 * Need to write timeout register before updating RcvHdrHead to ensure
11009 * that a new value is used when the HW decides to restart counting.
11010 */
11011 if (intr_adjust)
11012 adjust_rcv_timeout(rcd, npkts);
11013 if (updegr) {
11014 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11015 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11016 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11017 }
11018 mmiowb();
11019 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11020 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11021 << RCV_HDR_HEAD_HEAD_SHIFT);
11022 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11023 mmiowb();
11024}
11025
11026u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11027{
11028 u32 head, tail;
11029
11030 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11031 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11032
11033 if (rcd->rcvhdrtail_kvaddr)
11034 tail = get_rcvhdrtail(rcd);
11035 else
11036 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11037
11038 return head == tail;
11039}
11040
11041/*
11042 * Context Control and Receive Array encoding for buffer size:
11043 * 0x0 invalid
11044 * 0x1 4 KB
11045 * 0x2 8 KB
11046 * 0x3 16 KB
11047 * 0x4 32 KB
11048 * 0x5 64 KB
11049 * 0x6 128 KB
11050 * 0x7 256 KB
11051 * 0x8 512 KB (Receive Array only)
11052 * 0x9 1 MB (Receive Array only)
11053 * 0xa 2 MB (Receive Array only)
11054 *
11055 * 0xB-0xF - reserved (Receive Array only)
11056 *
11057 *
11058 * This routine assumes that the value has already been sanity checked.
11059 */
11060static u32 encoded_size(u32 size)
11061{
11062 switch (size) {
11063 case 4*1024: return 0x1;
11064 case 8*1024: return 0x2;
11065 case 16*1024: return 0x3;
11066 case 32*1024: return 0x4;
11067 case 64*1024: return 0x5;
11068 case 128*1024: return 0x6;
11069 case 256*1024: return 0x7;
11070 case 512*1024: return 0x8;
11071 case 1*1024*1024: return 0x9;
11072 case 2*1024*1024: return 0xa;
11073 }
11074 return 0x1; /* if invalid, go with the minimum size */
11075}
11076
11077void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11078{
11079 struct hfi1_ctxtdata *rcd;
11080 u64 rcvctrl, reg;
11081 int did_enable = 0;
11082
11083 rcd = dd->rcd[ctxt];
11084 if (!rcd)
11085 return;
11086
11087 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11088
11089 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11090 /* if the context already enabled, don't do the extra steps */
11091 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11092 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11093 /* reset the tail and hdr addresses, and sequence count */
11094 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11095 rcd->rcvhdrq_phys);
11096 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11097 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11098 rcd->rcvhdrqtailaddr_phys);
11099 rcd->seq_cnt = 1;
11100
11101 /* reset the cached receive header queue head value */
11102 rcd->head = 0;
11103
11104 /*
11105 * Zero the receive header queue so we don't get false
11106 * positives when checking the sequence number. The
11107 * sequence numbers could land exactly on the same spot.
11108 * E.g. a rcd restart before the receive header wrapped.
11109 */
11110 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11111
11112 /* starting timeout */
11113 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11114
11115 /* enable the context */
11116 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11117
11118 /* clean the egr buffer size first */
11119 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11120 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11121 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11122 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11123
11124 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11125 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11126 did_enable = 1;
11127
11128 /* zero RcvEgrIndexHead */
11129 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11130
11131 /* set eager count and base index */
11132 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11133 & RCV_EGR_CTRL_EGR_CNT_MASK)
11134 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11135 (((rcd->eager_base >> RCV_SHIFT)
11136 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11137 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11138 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11139
11140 /*
11141 * Set TID (expected) count and base index.
11142 * rcd->expected_count is set to individual RcvArray entries,
11143 * not pairs, and the CSR takes a pair-count in groups of
11144 * four, so divide by 8.
11145 */
11146 reg = (((rcd->expected_count >> RCV_SHIFT)
11147 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11148 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11149 (((rcd->expected_base >> RCV_SHIFT)
11150 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11151 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11152 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011153 if (ctxt == HFI1_CTRL_CTXT)
11154 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011155 }
11156 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11157 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011158 /*
11159 * When receive context is being disabled turn on tail
11160 * update with a dummy tail address and then disable
11161 * receive context.
11162 */
11163 if (dd->rcvhdrtail_dummy_physaddr) {
11164 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11165 dd->rcvhdrtail_dummy_physaddr);
11166 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11167 }
11168
Mike Marciniszyn77241052015-07-30 15:17:43 -040011169 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11170 }
11171 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11172 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11173 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11174 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11175 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11176 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11177 if (op & HFI1_RCVCTRL_TAILUPD_DIS)
11178 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11179 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11180 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11181 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11182 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11183 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11184 /* In one-packet-per-eager mode, the size comes from
11185 the RcvArray entry. */
11186 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11187 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11188 }
11189 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11190 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11191 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11192 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11193 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11194 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11195 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11196 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11197 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11198 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11199 rcd->rcvctrl = rcvctrl;
11200 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11201 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11202
11203 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11204 if (did_enable
11205 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11206 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11207 if (reg != 0) {
11208 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11209 ctxt, reg);
11210 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11211 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11212 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11213 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11214 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11215 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11216 ctxt, reg, reg == 0 ? "not" : "still");
11217 }
11218 }
11219
11220 if (did_enable) {
11221 /*
11222 * The interrupt timeout and count must be set after
11223 * the context is enabled to take effect.
11224 */
11225 /* set interrupt timeout */
11226 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11227 (u64)rcd->rcvavail_timeout <<
11228 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11229
11230 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11231 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11232 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11233 }
11234
11235 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11236 /*
11237 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011238 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11239 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011240 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011241 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11242 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011243}
11244
11245u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11246 u64 **cntrp)
11247{
11248 int ret;
11249 u64 val = 0;
11250
11251 if (namep) {
11252 ret = dd->cntrnameslen;
11253 if (pos != 0) {
11254 dd_dev_err(dd, "read_cntrs does not support indexing");
11255 return 0;
11256 }
11257 *namep = dd->cntrnames;
11258 } else {
11259 const struct cntr_entry *entry;
11260 int i, j;
11261
11262 ret = (dd->ndevcntrs) * sizeof(u64);
11263 if (pos != 0) {
11264 dd_dev_err(dd, "read_cntrs does not support indexing");
11265 return 0;
11266 }
11267
11268 /* Get the start of the block of counters */
11269 *cntrp = dd->cntrs;
11270
11271 /*
11272 * Now go and fill in each counter in the block.
11273 */
11274 for (i = 0; i < DEV_CNTR_LAST; i++) {
11275 entry = &dev_cntrs[i];
11276 hfi1_cdbg(CNTR, "reading %s", entry->name);
11277 if (entry->flags & CNTR_DISABLED) {
11278 /* Nothing */
11279 hfi1_cdbg(CNTR, "\tDisabled\n");
11280 } else {
11281 if (entry->flags & CNTR_VL) {
11282 hfi1_cdbg(CNTR, "\tPer VL\n");
11283 for (j = 0; j < C_VL_COUNT; j++) {
11284 val = entry->rw_cntr(entry,
11285 dd, j,
11286 CNTR_MODE_R,
11287 0);
11288 hfi1_cdbg(
11289 CNTR,
11290 "\t\tRead 0x%llx for %d\n",
11291 val, j);
11292 dd->cntrs[entry->offset + j] =
11293 val;
11294 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011295 } else if (entry->flags & CNTR_SDMA) {
11296 hfi1_cdbg(CNTR,
11297 "\t Per SDMA Engine\n");
11298 for (j = 0; j < dd->chip_sdma_engines;
11299 j++) {
11300 val =
11301 entry->rw_cntr(entry, dd, j,
11302 CNTR_MODE_R, 0);
11303 hfi1_cdbg(CNTR,
11304 "\t\tRead 0x%llx for %d\n",
11305 val, j);
11306 dd->cntrs[entry->offset + j] =
11307 val;
11308 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011309 } else {
11310 val = entry->rw_cntr(entry, dd,
11311 CNTR_INVALID_VL,
11312 CNTR_MODE_R, 0);
11313 dd->cntrs[entry->offset] = val;
11314 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11315 }
11316 }
11317 }
11318 }
11319 return ret;
11320}
11321
11322/*
11323 * Used by sysfs to create files for hfi stats to read
11324 */
11325u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11326 char **namep, u64 **cntrp)
11327{
11328 int ret;
11329 u64 val = 0;
11330
11331 if (namep) {
11332 ret = dd->portcntrnameslen;
11333 if (pos != 0) {
11334 dd_dev_err(dd, "index not supported");
11335 return 0;
11336 }
11337 *namep = dd->portcntrnames;
11338 } else {
11339 const struct cntr_entry *entry;
11340 struct hfi1_pportdata *ppd;
11341 int i, j;
11342
11343 ret = (dd->nportcntrs) * sizeof(u64);
11344 if (pos != 0) {
11345 dd_dev_err(dd, "indexing not supported");
11346 return 0;
11347 }
11348 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11349 *cntrp = ppd->cntrs;
11350
11351 for (i = 0; i < PORT_CNTR_LAST; i++) {
11352 entry = &port_cntrs[i];
11353 hfi1_cdbg(CNTR, "reading %s", entry->name);
11354 if (entry->flags & CNTR_DISABLED) {
11355 /* Nothing */
11356 hfi1_cdbg(CNTR, "\tDisabled\n");
11357 continue;
11358 }
11359
11360 if (entry->flags & CNTR_VL) {
11361 hfi1_cdbg(CNTR, "\tPer VL");
11362 for (j = 0; j < C_VL_COUNT; j++) {
11363 val = entry->rw_cntr(entry, ppd, j,
11364 CNTR_MODE_R,
11365 0);
11366 hfi1_cdbg(
11367 CNTR,
11368 "\t\tRead 0x%llx for %d",
11369 val, j);
11370 ppd->cntrs[entry->offset + j] = val;
11371 }
11372 } else {
11373 val = entry->rw_cntr(entry, ppd,
11374 CNTR_INVALID_VL,
11375 CNTR_MODE_R,
11376 0);
11377 ppd->cntrs[entry->offset] = val;
11378 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11379 }
11380 }
11381 }
11382 return ret;
11383}
11384
11385static void free_cntrs(struct hfi1_devdata *dd)
11386{
11387 struct hfi1_pportdata *ppd;
11388 int i;
11389
11390 if (dd->synth_stats_timer.data)
11391 del_timer_sync(&dd->synth_stats_timer);
11392 dd->synth_stats_timer.data = 0;
11393 ppd = (struct hfi1_pportdata *)(dd + 1);
11394 for (i = 0; i < dd->num_pports; i++, ppd++) {
11395 kfree(ppd->cntrs);
11396 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011397 free_percpu(ppd->ibport_data.rvp.rc_acks);
11398 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11399 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011400 ppd->cntrs = NULL;
11401 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011402 ppd->ibport_data.rvp.rc_acks = NULL;
11403 ppd->ibport_data.rvp.rc_qacks = NULL;
11404 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011405 }
11406 kfree(dd->portcntrnames);
11407 dd->portcntrnames = NULL;
11408 kfree(dd->cntrs);
11409 dd->cntrs = NULL;
11410 kfree(dd->scntrs);
11411 dd->scntrs = NULL;
11412 kfree(dd->cntrnames);
11413 dd->cntrnames = NULL;
11414}
11415
11416#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11417#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11418
11419static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11420 u64 *psval, void *context, int vl)
11421{
11422 u64 val;
11423 u64 sval = *psval;
11424
11425 if (entry->flags & CNTR_DISABLED) {
11426 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11427 return 0;
11428 }
11429
11430 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11431
11432 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11433
11434 /* If its a synthetic counter there is more work we need to do */
11435 if (entry->flags & CNTR_SYNTH) {
11436 if (sval == CNTR_MAX) {
11437 /* No need to read already saturated */
11438 return CNTR_MAX;
11439 }
11440
11441 if (entry->flags & CNTR_32BIT) {
11442 /* 32bit counters can wrap multiple times */
11443 u64 upper = sval >> 32;
11444 u64 lower = (sval << 32) >> 32;
11445
11446 if (lower > val) { /* hw wrapped */
11447 if (upper == CNTR_32BIT_MAX)
11448 val = CNTR_MAX;
11449 else
11450 upper++;
11451 }
11452
11453 if (val != CNTR_MAX)
11454 val = (upper << 32) | val;
11455
11456 } else {
11457 /* If we rolled we are saturated */
11458 if ((val < sval) || (val > CNTR_MAX))
11459 val = CNTR_MAX;
11460 }
11461 }
11462
11463 *psval = val;
11464
11465 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11466
11467 return val;
11468}
11469
11470static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11471 struct cntr_entry *entry,
11472 u64 *psval, void *context, int vl, u64 data)
11473{
11474 u64 val;
11475
11476 if (entry->flags & CNTR_DISABLED) {
11477 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11478 return 0;
11479 }
11480
11481 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11482
11483 if (entry->flags & CNTR_SYNTH) {
11484 *psval = data;
11485 if (entry->flags & CNTR_32BIT) {
11486 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11487 (data << 32) >> 32);
11488 val = data; /* return the full 64bit value */
11489 } else {
11490 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11491 data);
11492 }
11493 } else {
11494 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11495 }
11496
11497 *psval = val;
11498
11499 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11500
11501 return val;
11502}
11503
11504u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11505{
11506 struct cntr_entry *entry;
11507 u64 *sval;
11508
11509 entry = &dev_cntrs[index];
11510 sval = dd->scntrs + entry->offset;
11511
11512 if (vl != CNTR_INVALID_VL)
11513 sval += vl;
11514
11515 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11516}
11517
11518u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11519{
11520 struct cntr_entry *entry;
11521 u64 *sval;
11522
11523 entry = &dev_cntrs[index];
11524 sval = dd->scntrs + entry->offset;
11525
11526 if (vl != CNTR_INVALID_VL)
11527 sval += vl;
11528
11529 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11530}
11531
11532u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11533{
11534 struct cntr_entry *entry;
11535 u64 *sval;
11536
11537 entry = &port_cntrs[index];
11538 sval = ppd->scntrs + entry->offset;
11539
11540 if (vl != CNTR_INVALID_VL)
11541 sval += vl;
11542
11543 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11544 (index <= C_RCV_HDR_OVF_LAST)) {
11545 /* We do not want to bother for disabled contexts */
11546 return 0;
11547 }
11548
11549 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11550}
11551
11552u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11553{
11554 struct cntr_entry *entry;
11555 u64 *sval;
11556
11557 entry = &port_cntrs[index];
11558 sval = ppd->scntrs + entry->offset;
11559
11560 if (vl != CNTR_INVALID_VL)
11561 sval += vl;
11562
11563 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11564 (index <= C_RCV_HDR_OVF_LAST)) {
11565 /* We do not want to bother for disabled contexts */
11566 return 0;
11567 }
11568
11569 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11570}
11571
11572static void update_synth_timer(unsigned long opaque)
11573{
11574 u64 cur_tx;
11575 u64 cur_rx;
11576 u64 total_flits;
11577 u8 update = 0;
11578 int i, j, vl;
11579 struct hfi1_pportdata *ppd;
11580 struct cntr_entry *entry;
11581
11582 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11583
11584 /*
11585 * Rather than keep beating on the CSRs pick a minimal set that we can
11586 * check to watch for potential roll over. We can do this by looking at
11587 * the number of flits sent/recv. If the total flits exceeds 32bits then
11588 * we have to iterate all the counters and update.
11589 */
11590 entry = &dev_cntrs[C_DC_RCV_FLITS];
11591 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11592
11593 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11594 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11595
11596 hfi1_cdbg(
11597 CNTR,
11598 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11599 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11600
11601 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11602 /*
11603 * May not be strictly necessary to update but it won't hurt and
11604 * simplifies the logic here.
11605 */
11606 update = 1;
11607 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11608 dd->unit);
11609 } else {
11610 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11611 hfi1_cdbg(CNTR,
11612 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11613 total_flits, (u64)CNTR_32BIT_MAX);
11614 if (total_flits >= CNTR_32BIT_MAX) {
11615 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11616 dd->unit);
11617 update = 1;
11618 }
11619 }
11620
11621 if (update) {
11622 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11623 for (i = 0; i < DEV_CNTR_LAST; i++) {
11624 entry = &dev_cntrs[i];
11625 if (entry->flags & CNTR_VL) {
11626 for (vl = 0; vl < C_VL_COUNT; vl++)
11627 read_dev_cntr(dd, i, vl);
11628 } else {
11629 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11630 }
11631 }
11632 ppd = (struct hfi1_pportdata *)(dd + 1);
11633 for (i = 0; i < dd->num_pports; i++, ppd++) {
11634 for (j = 0; j < PORT_CNTR_LAST; j++) {
11635 entry = &port_cntrs[j];
11636 if (entry->flags & CNTR_VL) {
11637 for (vl = 0; vl < C_VL_COUNT; vl++)
11638 read_port_cntr(ppd, j, vl);
11639 } else {
11640 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11641 }
11642 }
11643 }
11644
11645 /*
11646 * We want the value in the register. The goal is to keep track
11647 * of the number of "ticks" not the counter value. In other
11648 * words if the register rolls we want to notice it and go ahead
11649 * and force an update.
11650 */
11651 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11652 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11653 CNTR_MODE_R, 0);
11654
11655 entry = &dev_cntrs[C_DC_RCV_FLITS];
11656 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11657 CNTR_MODE_R, 0);
11658
11659 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11660 dd->unit, dd->last_tx, dd->last_rx);
11661
11662 } else {
11663 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11664 }
11665
11666mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11667}
11668
11669#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11670static int init_cntrs(struct hfi1_devdata *dd)
11671{
Dean Luickc024c552016-01-11 18:30:57 -050011672 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011673 size_t sz;
11674 char *p;
11675 char name[C_MAX_NAME];
11676 struct hfi1_pportdata *ppd;
11677
11678 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011679 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11680 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011681
11682 /***********************/
11683 /* per device counters */
11684 /***********************/
11685
11686 /* size names and determine how many we have*/
11687 dd->ndevcntrs = 0;
11688 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011689
11690 for (i = 0; i < DEV_CNTR_LAST; i++) {
11691 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11692 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11693 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11694 continue;
11695 }
11696
11697 if (dev_cntrs[i].flags & CNTR_VL) {
11698 hfi1_dbg_early("\tProcessing VL cntr\n");
Dean Luickc024c552016-01-11 18:30:57 -050011699 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011700 for (j = 0; j < C_VL_COUNT; j++) {
11701 memset(name, '\0', C_MAX_NAME);
11702 snprintf(name, C_MAX_NAME, "%s%d",
11703 dev_cntrs[i].name,
11704 vl_from_idx(j));
11705 sz += strlen(name);
11706 sz++;
11707 hfi1_dbg_early("\t\t%s\n", name);
11708 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011709 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011710 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11711 hfi1_dbg_early(
11712 "\tProcessing per SDE counters chip enginers %u\n",
11713 dd->chip_sdma_engines);
Dean Luickc024c552016-01-11 18:30:57 -050011714 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011715 for (j = 0; j < dd->chip_sdma_engines; j++) {
11716 memset(name, '\0', C_MAX_NAME);
11717 snprintf(name, C_MAX_NAME, "%s%d",
11718 dev_cntrs[i].name, j);
11719 sz += strlen(name);
11720 sz++;
11721 hfi1_dbg_early("\t\t%s\n", name);
11722 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011723 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011724 } else {
11725 /* +1 for newline */
11726 sz += strlen(dev_cntrs[i].name) + 1;
Dean Luickc024c552016-01-11 18:30:57 -050011727 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011728 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011729 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11730 }
11731 }
11732
11733 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011734 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011735 if (!dd->cntrs)
11736 goto bail;
11737
Dean Luickc024c552016-01-11 18:30:57 -050011738 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011739 if (!dd->scntrs)
11740 goto bail;
11741
11742
11743 /* allocate space for the counter names */
11744 dd->cntrnameslen = sz;
11745 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11746 if (!dd->cntrnames)
11747 goto bail;
11748
11749 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011750 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011751 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11752 /* Nothing */
11753 } else {
11754 if (dev_cntrs[i].flags & CNTR_VL) {
11755 for (j = 0; j < C_VL_COUNT; j++) {
11756 memset(name, '\0', C_MAX_NAME);
11757 snprintf(name, C_MAX_NAME, "%s%d",
11758 dev_cntrs[i].name,
11759 vl_from_idx(j));
11760 memcpy(p, name, strlen(name));
11761 p += strlen(name);
11762 *p++ = '\n';
11763 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011764 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11765 for (j = 0; j < TXE_NUM_SDMA_ENGINES;
11766 j++) {
11767 memset(name, '\0', C_MAX_NAME);
11768 snprintf(name, C_MAX_NAME, "%s%d",
11769 dev_cntrs[i].name, j);
11770 memcpy(p, name, strlen(name));
11771 p += strlen(name);
11772 *p++ = '\n';
11773 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011774 } else {
11775 memcpy(p, dev_cntrs[i].name,
11776 strlen(dev_cntrs[i].name));
11777 p += strlen(dev_cntrs[i].name);
11778 *p++ = '\n';
11779 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011780 }
11781 }
11782
11783 /*********************/
11784 /* per port counters */
11785 /*********************/
11786
11787 /*
11788 * Go through the counters for the overflows and disable the ones we
11789 * don't need. This varies based on platform so we need to do it
11790 * dynamically here.
11791 */
11792 rcv_ctxts = dd->num_rcv_contexts;
11793 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11794 i <= C_RCV_HDR_OVF_LAST; i++) {
11795 port_cntrs[i].flags |= CNTR_DISABLED;
11796 }
11797
11798 /* size port counter names and determine how many we have*/
11799 sz = 0;
11800 dd->nportcntrs = 0;
11801 for (i = 0; i < PORT_CNTR_LAST; i++) {
11802 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11803 if (port_cntrs[i].flags & CNTR_DISABLED) {
11804 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11805 continue;
11806 }
11807
11808 if (port_cntrs[i].flags & CNTR_VL) {
11809 hfi1_dbg_early("\tProcessing VL cntr\n");
11810 port_cntrs[i].offset = dd->nportcntrs;
11811 for (j = 0; j < C_VL_COUNT; j++) {
11812 memset(name, '\0', C_MAX_NAME);
11813 snprintf(name, C_MAX_NAME, "%s%d",
11814 port_cntrs[i].name,
11815 vl_from_idx(j));
11816 sz += strlen(name);
11817 sz++;
11818 hfi1_dbg_early("\t\t%s\n", name);
11819 dd->nportcntrs++;
11820 }
11821 } else {
11822 /* +1 for newline */
11823 sz += strlen(port_cntrs[i].name) + 1;
11824 port_cntrs[i].offset = dd->nportcntrs;
11825 dd->nportcntrs++;
11826 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11827 }
11828 }
11829
11830 /* allocate space for the counter names */
11831 dd->portcntrnameslen = sz;
11832 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11833 if (!dd->portcntrnames)
11834 goto bail;
11835
11836 /* fill in port cntr names */
11837 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11838 if (port_cntrs[i].flags & CNTR_DISABLED)
11839 continue;
11840
11841 if (port_cntrs[i].flags & CNTR_VL) {
11842 for (j = 0; j < C_VL_COUNT; j++) {
11843 memset(name, '\0', C_MAX_NAME);
11844 snprintf(name, C_MAX_NAME, "%s%d",
11845 port_cntrs[i].name,
11846 vl_from_idx(j));
11847 memcpy(p, name, strlen(name));
11848 p += strlen(name);
11849 *p++ = '\n';
11850 }
11851 } else {
11852 memcpy(p, port_cntrs[i].name,
11853 strlen(port_cntrs[i].name));
11854 p += strlen(port_cntrs[i].name);
11855 *p++ = '\n';
11856 }
11857 }
11858
11859 /* allocate per port storage for counter values */
11860 ppd = (struct hfi1_pportdata *)(dd + 1);
11861 for (i = 0; i < dd->num_pports; i++, ppd++) {
11862 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11863 if (!ppd->cntrs)
11864 goto bail;
11865
11866 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
11867 if (!ppd->scntrs)
11868 goto bail;
11869 }
11870
11871 /* CPU counters need to be allocated and zeroed */
11872 if (init_cpu_counters(dd))
11873 goto bail;
11874
11875 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11876 return 0;
11877bail:
11878 free_cntrs(dd);
11879 return -ENOMEM;
11880}
11881
11882
11883static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
11884{
11885 switch (chip_lstate) {
11886 default:
11887 dd_dev_err(dd,
11888 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
11889 chip_lstate);
11890 /* fall through */
11891 case LSTATE_DOWN:
11892 return IB_PORT_DOWN;
11893 case LSTATE_INIT:
11894 return IB_PORT_INIT;
11895 case LSTATE_ARMED:
11896 return IB_PORT_ARMED;
11897 case LSTATE_ACTIVE:
11898 return IB_PORT_ACTIVE;
11899 }
11900}
11901
11902u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
11903{
11904 /* look at the HFI meta-states only */
11905 switch (chip_pstate & 0xf0) {
11906 default:
11907 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
11908 chip_pstate);
11909 /* fall through */
11910 case PLS_DISABLED:
11911 return IB_PORTPHYSSTATE_DISABLED;
11912 case PLS_OFFLINE:
11913 return OPA_PORTPHYSSTATE_OFFLINE;
11914 case PLS_POLLING:
11915 return IB_PORTPHYSSTATE_POLLING;
11916 case PLS_CONFIGPHY:
11917 return IB_PORTPHYSSTATE_TRAINING;
11918 case PLS_LINKUP:
11919 return IB_PORTPHYSSTATE_LINKUP;
11920 case PLS_PHYTEST:
11921 return IB_PORTPHYSSTATE_PHY_TEST;
11922 }
11923}
11924
11925/* return the OPA port logical state name */
11926const char *opa_lstate_name(u32 lstate)
11927{
11928 static const char * const port_logical_names[] = {
11929 "PORT_NOP",
11930 "PORT_DOWN",
11931 "PORT_INIT",
11932 "PORT_ARMED",
11933 "PORT_ACTIVE",
11934 "PORT_ACTIVE_DEFER",
11935 };
11936 if (lstate < ARRAY_SIZE(port_logical_names))
11937 return port_logical_names[lstate];
11938 return "unknown";
11939}
11940
11941/* return the OPA port physical state name */
11942const char *opa_pstate_name(u32 pstate)
11943{
11944 static const char * const port_physical_names[] = {
11945 "PHYS_NOP",
11946 "reserved1",
11947 "PHYS_POLL",
11948 "PHYS_DISABLED",
11949 "PHYS_TRAINING",
11950 "PHYS_LINKUP",
11951 "PHYS_LINK_ERR_RECOVER",
11952 "PHYS_PHY_TEST",
11953 "reserved8",
11954 "PHYS_OFFLINE",
11955 "PHYS_GANGED",
11956 "PHYS_TEST",
11957 };
11958 if (pstate < ARRAY_SIZE(port_physical_names))
11959 return port_physical_names[pstate];
11960 return "unknown";
11961}
11962
11963/*
11964 * Read the hardware link state and set the driver's cached value of it.
11965 * Return the (new) current value.
11966 */
11967u32 get_logical_state(struct hfi1_pportdata *ppd)
11968{
11969 u32 new_state;
11970
11971 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
11972 if (new_state != ppd->lstate) {
11973 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
11974 opa_lstate_name(new_state), new_state);
11975 ppd->lstate = new_state;
11976 }
11977 /*
11978 * Set port status flags in the page mapped into userspace
11979 * memory. Do it here to ensure a reliable state - this is
11980 * the only function called by all state handling code.
11981 * Always set the flags due to the fact that the cache value
11982 * might have been changed explicitly outside of this
11983 * function.
11984 */
11985 if (ppd->statusp) {
11986 switch (ppd->lstate) {
11987 case IB_PORT_DOWN:
11988 case IB_PORT_INIT:
11989 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
11990 HFI1_STATUS_IB_READY);
11991 break;
11992 case IB_PORT_ARMED:
11993 *ppd->statusp |= HFI1_STATUS_IB_CONF;
11994 break;
11995 case IB_PORT_ACTIVE:
11996 *ppd->statusp |= HFI1_STATUS_IB_READY;
11997 break;
11998 }
11999 }
12000 return ppd->lstate;
12001}
12002
12003/**
12004 * wait_logical_linkstate - wait for an IB link state change to occur
12005 * @ppd: port device
12006 * @state: the state to wait for
12007 * @msecs: the number of milliseconds to wait
12008 *
12009 * Wait up to msecs milliseconds for IB link state change to occur.
12010 * For now, take the easy polling route.
12011 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12012 */
12013static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12014 int msecs)
12015{
12016 unsigned long timeout;
12017
12018 timeout = jiffies + msecs_to_jiffies(msecs);
12019 while (1) {
12020 if (get_logical_state(ppd) == state)
12021 return 0;
12022 if (time_after(jiffies, timeout))
12023 break;
12024 msleep(20);
12025 }
12026 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12027
12028 return -ETIMEDOUT;
12029}
12030
12031u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12032{
12033 static u32 remembered_state = 0xff;
12034 u32 pstate;
12035 u32 ib_pstate;
12036
12037 pstate = read_physical_state(ppd->dd);
12038 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12039 if (remembered_state != ib_pstate) {
12040 dd_dev_info(ppd->dd,
12041 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12042 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12043 pstate);
12044 remembered_state = ib_pstate;
12045 }
12046 return ib_pstate;
12047}
12048
12049/*
12050 * Read/modify/write ASIC_QSFP register bits as selected by mask
12051 * data: 0 or 1 in the positions depending on what needs to be written
12052 * dir: 0 for read, 1 for write
12053 * mask: select by setting
12054 * I2CCLK (bit 0)
12055 * I2CDATA (bit 1)
12056 */
12057u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12058 u32 mask)
12059{
12060 u64 qsfp_oe, target_oe;
12061
12062 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12063 if (mask) {
12064 /* We are writing register bits, so lock access */
12065 dir &= mask;
12066 data &= mask;
12067
12068 qsfp_oe = read_csr(dd, target_oe);
12069 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12070 write_csr(dd, target_oe, qsfp_oe);
12071 }
12072 /* We are exclusively reading bits here, but it is unlikely
12073 * we'll get valid data when we set the direction of the pin
12074 * in the same call, so read should call this function again
12075 * to get valid data
12076 */
12077 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12078}
12079
12080#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12081(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12082
12083#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12084(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12085
12086int hfi1_init_ctxt(struct send_context *sc)
12087{
12088 if (sc != NULL) {
12089 struct hfi1_devdata *dd = sc->dd;
12090 u64 reg;
12091 u8 set = (sc->type == SC_USER ?
12092 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12093 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12094 reg = read_kctxt_csr(dd, sc->hw_context,
12095 SEND_CTXT_CHECK_ENABLE);
12096 if (set)
12097 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12098 else
12099 SET_STATIC_RATE_CONTROL_SMASK(reg);
12100 write_kctxt_csr(dd, sc->hw_context,
12101 SEND_CTXT_CHECK_ENABLE, reg);
12102 }
12103 return 0;
12104}
12105
12106int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12107{
12108 int ret = 0;
12109 u64 reg;
12110
12111 if (dd->icode != ICODE_RTL_SILICON) {
12112 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12113 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12114 __func__);
12115 return -EINVAL;
12116 }
12117 reg = read_csr(dd, ASIC_STS_THERM);
12118 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12119 ASIC_STS_THERM_CURR_TEMP_MASK);
12120 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12121 ASIC_STS_THERM_LO_TEMP_MASK);
12122 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12123 ASIC_STS_THERM_HI_TEMP_MASK);
12124 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12125 ASIC_STS_THERM_CRIT_TEMP_MASK);
12126 /* triggers is a 3-bit value - 1 bit per trigger. */
12127 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12128
12129 return ret;
12130}
12131
12132/* ========================================================================= */
12133
12134/*
12135 * Enable/disable chip from delivering interrupts.
12136 */
12137void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12138{
12139 int i;
12140
12141 /*
12142 * In HFI, the mask needs to be 1 to allow interrupts.
12143 */
12144 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012145 /* enable all interrupts */
12146 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12147 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12148
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012149 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012150 } else {
12151 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12152 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12153 }
12154}
12155
12156/*
12157 * Clear all interrupt sources on the chip.
12158 */
12159static void clear_all_interrupts(struct hfi1_devdata *dd)
12160{
12161 int i;
12162
12163 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12164 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12165
12166 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12167 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12168 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12169 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12170 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12171 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12172 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12173 for (i = 0; i < dd->chip_send_contexts; i++)
12174 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12175 for (i = 0; i < dd->chip_sdma_engines; i++)
12176 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12177
12178 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12179 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12180 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12181}
12182
12183/* Move to pcie.c? */
12184static void disable_intx(struct pci_dev *pdev)
12185{
12186 pci_intx(pdev, 0);
12187}
12188
12189static void clean_up_interrupts(struct hfi1_devdata *dd)
12190{
12191 int i;
12192
12193 /* remove irqs - must happen before disabling/turning off */
12194 if (dd->num_msix_entries) {
12195 /* MSI-X */
12196 struct hfi1_msix_entry *me = dd->msix_entries;
12197
12198 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12199 if (me->arg == NULL) /* => no irq, no affinity */
12200 break;
12201 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12202 NULL);
12203 free_irq(me->msix.vector, me->arg);
12204 }
12205 } else {
12206 /* INTx */
12207 if (dd->requested_intx_irq) {
12208 free_irq(dd->pcidev->irq, dd);
12209 dd->requested_intx_irq = 0;
12210 }
12211 }
12212
12213 /* turn off interrupts */
12214 if (dd->num_msix_entries) {
12215 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012216 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012217 } else {
12218 /* INTx */
12219 disable_intx(dd->pcidev);
12220 }
12221
12222 /* clean structures */
12223 for (i = 0; i < dd->num_msix_entries; i++)
12224 free_cpumask_var(dd->msix_entries[i].mask);
12225 kfree(dd->msix_entries);
12226 dd->msix_entries = NULL;
12227 dd->num_msix_entries = 0;
12228}
12229
12230/*
12231 * Remap the interrupt source from the general handler to the given MSI-X
12232 * interrupt.
12233 */
12234static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12235{
12236 u64 reg;
12237 int m, n;
12238
12239 /* clear from the handled mask of the general interrupt */
12240 m = isrc / 64;
12241 n = isrc % 64;
12242 dd->gi_mask[m] &= ~((u64)1 << n);
12243
12244 /* direct the chip source to the given MSI-X interrupt */
12245 m = isrc / 8;
12246 n = isrc % 8;
12247 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12248 reg &= ~((u64)0xff << (8*n));
12249 reg |= ((u64)msix_intr & 0xff) << (8*n);
12250 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12251}
12252
12253static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12254 int engine, int msix_intr)
12255{
12256 /*
12257 * SDMA engine interrupt sources grouped by type, rather than
12258 * engine. Per-engine interrupts are as follows:
12259 * SDMA
12260 * SDMAProgress
12261 * SDMAIdle
12262 */
12263 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12264 msix_intr);
12265 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12266 msix_intr);
12267 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12268 msix_intr);
12269}
12270
Mike Marciniszyn77241052015-07-30 15:17:43 -040012271static int request_intx_irq(struct hfi1_devdata *dd)
12272{
12273 int ret;
12274
Jubin John98050712015-11-16 21:59:27 -050012275 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12276 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012277 ret = request_irq(dd->pcidev->irq, general_interrupt,
12278 IRQF_SHARED, dd->intx_name, dd);
12279 if (ret)
12280 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12281 ret);
12282 else
12283 dd->requested_intx_irq = 1;
12284 return ret;
12285}
12286
12287static int request_msix_irqs(struct hfi1_devdata *dd)
12288{
12289 const struct cpumask *local_mask;
12290 cpumask_var_t def, rcv;
12291 bool def_ret, rcv_ret;
12292 int first_general, last_general;
12293 int first_sdma, last_sdma;
12294 int first_rx, last_rx;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012295 int first_cpu, curr_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012296 int rcv_cpu, sdma_cpu;
12297 int i, ret = 0, possible;
12298 int ht;
12299
12300 /* calculate the ranges we are going to use */
12301 first_general = 0;
12302 first_sdma = last_general = first_general + 1;
12303 first_rx = last_sdma = first_sdma + dd->num_sdma;
12304 last_rx = first_rx + dd->n_krcv_queues;
12305
12306 /*
12307 * Interrupt affinity.
12308 *
12309 * non-rcv avail gets a default mask that
12310 * starts as possible cpus with threads reset
12311 * and each rcv avail reset.
12312 *
12313 * rcv avail gets node relative 1 wrapping back
12314 * to the node relative 1 as necessary.
12315 *
12316 */
12317 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12318 /* if first cpu is invalid, use NUMA 0 */
12319 if (cpumask_first(local_mask) >= nr_cpu_ids)
12320 local_mask = topology_core_cpumask(0);
12321
12322 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12323 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12324 if (!def_ret || !rcv_ret)
12325 goto bail;
12326 /* use local mask as default */
12327 cpumask_copy(def, local_mask);
12328 possible = cpumask_weight(def);
12329 /* disarm threads from default */
12330 ht = cpumask_weight(
12331 topology_sibling_cpumask(cpumask_first(local_mask)));
12332 for (i = possible/ht; i < possible; i++)
12333 cpumask_clear_cpu(i, def);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012334 /* def now has full cores on chosen node*/
12335 first_cpu = cpumask_first(def);
12336 if (nr_cpu_ids >= first_cpu)
12337 first_cpu++;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012338 curr_cpu = first_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012339
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012340 /* One context is reserved as control context */
12341 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012342 cpumask_clear_cpu(curr_cpu, def);
12343 cpumask_set_cpu(curr_cpu, rcv);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012344 curr_cpu = cpumask_next(curr_cpu, def);
12345 if (curr_cpu >= nr_cpu_ids)
12346 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012347 }
12348 /* def mask has non-rcv, rcv has recv mask */
12349 rcv_cpu = cpumask_first(rcv);
12350 sdma_cpu = cpumask_first(def);
12351
12352 /*
12353 * Sanity check - the code expects all SDMA chip source
12354 * interrupts to be in the same CSR, starting at bit 0. Verify
12355 * that this is true by checking the bit location of the start.
12356 */
12357 BUILD_BUG_ON(IS_SDMA_START % 64);
12358
12359 for (i = 0; i < dd->num_msix_entries; i++) {
12360 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12361 const char *err_info;
12362 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012363 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012364 void *arg;
12365 int idx;
12366 struct hfi1_ctxtdata *rcd = NULL;
12367 struct sdma_engine *sde = NULL;
12368
12369 /* obtain the arguments to request_irq */
12370 if (first_general <= i && i < last_general) {
12371 idx = i - first_general;
12372 handler = general_interrupt;
12373 arg = dd;
12374 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012375 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012376 err_info = "general";
12377 } else if (first_sdma <= i && i < last_sdma) {
12378 idx = i - first_sdma;
12379 sde = &dd->per_sdma[idx];
12380 handler = sdma_interrupt;
12381 arg = sde;
12382 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012383 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012384 err_info = "sdma";
12385 remap_sdma_interrupts(dd, idx, i);
12386 } else if (first_rx <= i && i < last_rx) {
12387 idx = i - first_rx;
12388 rcd = dd->rcd[idx];
12389 /* no interrupt if no rcd */
12390 if (!rcd)
12391 continue;
12392 /*
12393 * Set the interrupt register and mask for this
12394 * context's interrupt.
12395 */
12396 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12397 rcd->imask = ((u64)1) <<
12398 ((IS_RCVAVAIL_START+idx) % 64);
12399 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012400 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012401 arg = rcd;
12402 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012403 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012404 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012405 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012406 } else {
12407 /* not in our expected range - complain, then
12408 ignore it */
12409 dd_dev_err(dd,
12410 "Unexpected extra MSI-X interrupt %d\n", i);
12411 continue;
12412 }
12413 /* no argument, no interrupt */
12414 if (arg == NULL)
12415 continue;
12416 /* make sure the name is terminated */
12417 me->name[sizeof(me->name)-1] = 0;
12418
Dean Luickf4f30031c2015-10-26 10:28:44 -040012419 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12420 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012421 if (ret) {
12422 dd_dev_err(dd,
12423 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12424 err_info, me->msix.vector, idx, ret);
12425 return ret;
12426 }
12427 /*
12428 * assign arg after request_irq call, so it will be
12429 * cleaned up
12430 */
12431 me->arg = arg;
12432
12433 if (!zalloc_cpumask_var(
12434 &dd->msix_entries[i].mask,
12435 GFP_KERNEL))
12436 goto bail;
12437 if (handler == sdma_interrupt) {
12438 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12439 sde->this_idx, sdma_cpu);
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -050012440 sde->cpu = sdma_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012441 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12442 sdma_cpu = cpumask_next(sdma_cpu, def);
12443 if (sdma_cpu >= nr_cpu_ids)
12444 sdma_cpu = cpumask_first(def);
12445 } else if (handler == receive_context_interrupt) {
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012446 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12447 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12448 cpumask_first(def) : rcv_cpu);
12449 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12450 /* map to first default */
12451 cpumask_set_cpu(cpumask_first(def),
12452 dd->msix_entries[i].mask);
12453 } else {
12454 cpumask_set_cpu(rcv_cpu,
12455 dd->msix_entries[i].mask);
12456 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12457 if (rcv_cpu >= nr_cpu_ids)
12458 rcv_cpu = cpumask_first(rcv);
12459 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012460 } else {
12461 /* otherwise first def */
12462 dd_dev_info(dd, "%s cpu %d\n",
12463 err_info, cpumask_first(def));
12464 cpumask_set_cpu(
12465 cpumask_first(def), dd->msix_entries[i].mask);
12466 }
12467 irq_set_affinity_hint(
12468 dd->msix_entries[i].msix.vector,
12469 dd->msix_entries[i].mask);
12470 }
12471
12472out:
12473 free_cpumask_var(def);
12474 free_cpumask_var(rcv);
12475 return ret;
12476bail:
12477 ret = -ENOMEM;
12478 goto out;
12479}
12480
12481/*
12482 * Set the general handler to accept all interrupts, remap all
12483 * chip interrupts back to MSI-X 0.
12484 */
12485static void reset_interrupts(struct hfi1_devdata *dd)
12486{
12487 int i;
12488
12489 /* all interrupts handled by the general handler */
12490 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12491 dd->gi_mask[i] = ~(u64)0;
12492
12493 /* all chip interrupts map to MSI-X 0 */
12494 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12495 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12496}
12497
12498static int set_up_interrupts(struct hfi1_devdata *dd)
12499{
12500 struct hfi1_msix_entry *entries;
12501 u32 total, request;
12502 int i, ret;
12503 int single_interrupt = 0; /* we expect to have all the interrupts */
12504
12505 /*
12506 * Interrupt count:
12507 * 1 general, "slow path" interrupt (includes the SDMA engines
12508 * slow source, SDMACleanupDone)
12509 * N interrupts - one per used SDMA engine
12510 * M interrupt - one per kernel receive context
12511 */
12512 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12513
12514 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12515 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012516 ret = -ENOMEM;
12517 goto fail;
12518 }
12519 /* 1-1 MSI-X entry assignment */
12520 for (i = 0; i < total; i++)
12521 entries[i].msix.entry = i;
12522
12523 /* ask for MSI-X interrupts */
12524 request = total;
12525 request_msix(dd, &request, entries);
12526
12527 if (request == 0) {
12528 /* using INTx */
12529 /* dd->num_msix_entries already zero */
12530 kfree(entries);
12531 single_interrupt = 1;
12532 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12533 } else {
12534 /* using MSI-X */
12535 dd->num_msix_entries = request;
12536 dd->msix_entries = entries;
12537
12538 if (request != total) {
12539 /* using MSI-X, with reduced interrupts */
12540 dd_dev_err(
12541 dd,
12542 "cannot handle reduced interrupt case, want %u, got %u\n",
12543 total, request);
12544 ret = -EINVAL;
12545 goto fail;
12546 }
12547 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12548 }
12549
12550 /* mask all interrupts */
12551 set_intr_state(dd, 0);
12552 /* clear all pending interrupts */
12553 clear_all_interrupts(dd);
12554
12555 /* reset general handler mask, chip MSI-X mappings */
12556 reset_interrupts(dd);
12557
12558 if (single_interrupt)
12559 ret = request_intx_irq(dd);
12560 else
12561 ret = request_msix_irqs(dd);
12562 if (ret)
12563 goto fail;
12564
12565 return 0;
12566
12567fail:
12568 clean_up_interrupts(dd);
12569 return ret;
12570}
12571
12572/*
12573 * Set up context values in dd. Sets:
12574 *
12575 * num_rcv_contexts - number of contexts being used
12576 * n_krcv_queues - number of kernel contexts
12577 * first_user_ctxt - first non-kernel context in array of contexts
12578 * freectxts - number of free user contexts
12579 * num_send_contexts - number of PIO send contexts being used
12580 */
12581static int set_up_context_variables(struct hfi1_devdata *dd)
12582{
12583 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012584 int total_contexts;
12585 int ret;
12586 unsigned ngroups;
12587
12588 /*
12589 * Kernel contexts: (to be fixed later):
12590 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012591 * - Context 0 - control context (VL15/multicast/error)
12592 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012593 */
12594 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012595 /*
12596 * Don't count context 0 in n_krcvqs since
12597 * is isn't used for normal verbs traffic.
12598 *
12599 * krcvqs will reflect number of kernel
12600 * receive contexts above 0.
12601 */
12602 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012603 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012604 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012605 num_kernel_contexts =
12606 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12607 /*
12608 * Every kernel receive context needs an ACK send context.
12609 * one send context is allocated for each VL{0-7} and VL15
12610 */
12611 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12612 dd_dev_err(dd,
12613 "Reducing # kernel rcv contexts to: %d, from %d\n",
12614 (int)(dd->chip_send_contexts - num_vls - 1),
12615 (int)num_kernel_contexts);
12616 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12617 }
12618 /*
12619 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012620 * - default to 1 user context per CPU if num_user_contexts is
12621 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012622 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012623 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012624 num_user_contexts = num_online_cpus();
12625
12626 total_contexts = num_kernel_contexts + num_user_contexts;
12627
12628 /*
12629 * Adjust the counts given a global max.
12630 */
12631 if (total_contexts > dd->chip_rcv_contexts) {
12632 dd_dev_err(dd,
12633 "Reducing # user receive contexts to: %d, from %d\n",
12634 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12635 (int)num_user_contexts);
12636 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12637 /* recalculate */
12638 total_contexts = num_kernel_contexts + num_user_contexts;
12639 }
12640
12641 /* the first N are kernel contexts, the rest are user contexts */
12642 dd->num_rcv_contexts = total_contexts;
12643 dd->n_krcv_queues = num_kernel_contexts;
12644 dd->first_user_ctxt = num_kernel_contexts;
12645 dd->freectxts = num_user_contexts;
12646 dd_dev_info(dd,
12647 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12648 (int)dd->chip_rcv_contexts,
12649 (int)dd->num_rcv_contexts,
12650 (int)dd->n_krcv_queues,
12651 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12652
12653 /*
12654 * Receive array allocation:
12655 * All RcvArray entries are divided into groups of 8. This
12656 * is required by the hardware and will speed up writes to
12657 * consecutive entries by using write-combining of the entire
12658 * cacheline.
12659 *
12660 * The number of groups are evenly divided among all contexts.
12661 * any left over groups will be given to the first N user
12662 * contexts.
12663 */
12664 dd->rcv_entries.group_size = RCV_INCREMENT;
12665 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12666 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12667 dd->rcv_entries.nctxt_extra = ngroups -
12668 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12669 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12670 dd->rcv_entries.ngroups,
12671 dd->rcv_entries.nctxt_extra);
12672 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12673 MAX_EAGER_ENTRIES * 2) {
12674 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12675 dd->rcv_entries.group_size;
12676 dd_dev_info(dd,
12677 "RcvArray group count too high, change to %u\n",
12678 dd->rcv_entries.ngroups);
12679 dd->rcv_entries.nctxt_extra = 0;
12680 }
12681 /*
12682 * PIO send contexts
12683 */
12684 ret = init_sc_pools_and_sizes(dd);
12685 if (ret >= 0) { /* success */
12686 dd->num_send_contexts = ret;
12687 dd_dev_info(
12688 dd,
12689 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12690 dd->chip_send_contexts,
12691 dd->num_send_contexts,
12692 dd->sc_sizes[SC_KERNEL].count,
12693 dd->sc_sizes[SC_ACK].count,
12694 dd->sc_sizes[SC_USER].count);
12695 ret = 0; /* success */
12696 }
12697
12698 return ret;
12699}
12700
12701/*
12702 * Set the device/port partition key table. The MAD code
12703 * will ensure that, at least, the partial management
12704 * partition key is present in the table.
12705 */
12706static void set_partition_keys(struct hfi1_pportdata *ppd)
12707{
12708 struct hfi1_devdata *dd = ppd->dd;
12709 u64 reg = 0;
12710 int i;
12711
12712 dd_dev_info(dd, "Setting partition keys\n");
12713 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12714 reg |= (ppd->pkeys[i] &
12715 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12716 ((i % 4) *
12717 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12718 /* Each register holds 4 PKey values. */
12719 if ((i % 4) == 3) {
12720 write_csr(dd, RCV_PARTITION_KEY +
12721 ((i - 3) * 2), reg);
12722 reg = 0;
12723 }
12724 }
12725
12726 /* Always enable HW pkeys check when pkeys table is set */
12727 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12728}
12729
12730/*
12731 * These CSRs and memories are uninitialized on reset and must be
12732 * written before reading to set the ECC/parity bits.
12733 *
12734 * NOTE: All user context CSRs that are not mmaped write-only
12735 * (e.g. the TID flows) must be initialized even if the driver never
12736 * reads them.
12737 */
12738static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12739{
12740 int i, j;
12741
12742 /* CceIntMap */
12743 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12744 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12745
12746 /* SendCtxtCreditReturnAddr */
12747 for (i = 0; i < dd->chip_send_contexts; i++)
12748 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12749
12750 /* PIO Send buffers */
12751 /* SDMA Send buffers */
12752 /* These are not normally read, and (presently) have no method
12753 to be read, so are not pre-initialized */
12754
12755 /* RcvHdrAddr */
12756 /* RcvHdrTailAddr */
12757 /* RcvTidFlowTable */
12758 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12759 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12760 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12761 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12762 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12763 }
12764
12765 /* RcvArray */
12766 for (i = 0; i < dd->chip_rcv_array_count; i++)
12767 write_csr(dd, RCV_ARRAY + (8*i),
12768 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12769
12770 /* RcvQPMapTable */
12771 for (i = 0; i < 32; i++)
12772 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12773}
12774
12775/*
12776 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12777 */
12778static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12779 u64 ctrl_bits)
12780{
12781 unsigned long timeout;
12782 u64 reg;
12783
12784 /* is the condition present? */
12785 reg = read_csr(dd, CCE_STATUS);
12786 if ((reg & status_bits) == 0)
12787 return;
12788
12789 /* clear the condition */
12790 write_csr(dd, CCE_CTRL, ctrl_bits);
12791
12792 /* wait for the condition to clear */
12793 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12794 while (1) {
12795 reg = read_csr(dd, CCE_STATUS);
12796 if ((reg & status_bits) == 0)
12797 return;
12798 if (time_after(jiffies, timeout)) {
12799 dd_dev_err(dd,
12800 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12801 status_bits, reg & status_bits);
12802 return;
12803 }
12804 udelay(1);
12805 }
12806}
12807
12808/* set CCE CSRs to chip reset defaults */
12809static void reset_cce_csrs(struct hfi1_devdata *dd)
12810{
12811 int i;
12812
12813 /* CCE_REVISION read-only */
12814 /* CCE_REVISION2 read-only */
12815 /* CCE_CTRL - bits clear automatically */
12816 /* CCE_STATUS read-only, use CceCtrl to clear */
12817 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12818 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12819 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12820 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12821 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12822 /* CCE_ERR_STATUS read-only */
12823 write_csr(dd, CCE_ERR_MASK, 0);
12824 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12825 /* CCE_ERR_FORCE leave alone */
12826 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12827 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12828 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12829 /* CCE_PCIE_CTRL leave alone */
12830 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12831 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12832 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12833 CCE_MSIX_TABLE_UPPER_RESETCSR);
12834 }
12835 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12836 /* CCE_MSIX_PBA read-only */
12837 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12838 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12839 }
12840 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12841 write_csr(dd, CCE_INT_MAP, 0);
12842 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12843 /* CCE_INT_STATUS read-only */
12844 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12845 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12846 /* CCE_INT_FORCE leave alone */
12847 /* CCE_INT_BLOCKED read-only */
12848 }
12849 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12850 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12851}
12852
12853/* set ASIC CSRs to chip reset defaults */
12854static void reset_asic_csrs(struct hfi1_devdata *dd)
12855{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012856 int i;
12857
12858 /*
12859 * If the HFIs are shared between separate nodes or VMs,
12860 * then more will need to be done here. One idea is a module
12861 * parameter that returns early, letting the first power-on or
12862 * a known first load do the reset and blocking all others.
12863 */
12864
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012865 if (!(dd->flags & HFI1_DO_INIT_ASIC))
12866 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012867
12868 if (dd->icode != ICODE_FPGA_EMULATION) {
12869 /* emulation does not have an SBus - leave these alone */
12870 /*
12871 * All writes to ASIC_CFG_SBUS_REQUEST do something.
12872 * Notes:
12873 * o The reset is not zero if aimed at the core. See the
12874 * SBus documentation for details.
12875 * o If the SBus firmware has been updated (e.g. by the BIOS),
12876 * will the reset revert that?
12877 */
12878 /* ASIC_CFG_SBUS_REQUEST leave alone */
12879 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12880 }
12881 /* ASIC_SBUS_RESULT read-only */
12882 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12883 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12884 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12885 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012886
12887 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012888 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040012889
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050012890 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012891 /* ASIC_STS_THERM read-only */
12892 /* ASIC_CFG_RESET leave alone */
12893
12894 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12895 /* ASIC_PCIE_SD_HOST_STATUS read-only */
12896 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12897 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12898 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12899 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12900 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12901 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12902 for (i = 0; i < 16; i++)
12903 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12904
12905 /* ASIC_GPIO_IN read-only */
12906 write_csr(dd, ASIC_GPIO_OE, 0);
12907 write_csr(dd, ASIC_GPIO_INVERT, 0);
12908 write_csr(dd, ASIC_GPIO_OUT, 0);
12909 write_csr(dd, ASIC_GPIO_MASK, 0);
12910 /* ASIC_GPIO_STATUS read-only */
12911 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12912 /* ASIC_GPIO_FORCE leave alone */
12913
12914 /* ASIC_QSFP1_IN read-only */
12915 write_csr(dd, ASIC_QSFP1_OE, 0);
12916 write_csr(dd, ASIC_QSFP1_INVERT, 0);
12917 write_csr(dd, ASIC_QSFP1_OUT, 0);
12918 write_csr(dd, ASIC_QSFP1_MASK, 0);
12919 /* ASIC_QSFP1_STATUS read-only */
12920 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
12921 /* ASIC_QSFP1_FORCE leave alone */
12922
12923 /* ASIC_QSFP2_IN read-only */
12924 write_csr(dd, ASIC_QSFP2_OE, 0);
12925 write_csr(dd, ASIC_QSFP2_INVERT, 0);
12926 write_csr(dd, ASIC_QSFP2_OUT, 0);
12927 write_csr(dd, ASIC_QSFP2_MASK, 0);
12928 /* ASIC_QSFP2_STATUS read-only */
12929 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
12930 /* ASIC_QSFP2_FORCE leave alone */
12931
12932 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
12933 /* this also writes a NOP command, clearing paging mode */
12934 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
12935 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012936}
12937
12938/* set MISC CSRs to chip reset defaults */
12939static void reset_misc_csrs(struct hfi1_devdata *dd)
12940{
12941 int i;
12942
12943 for (i = 0; i < 32; i++) {
12944 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
12945 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
12946 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
12947 }
12948 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
12949 only be written 128-byte chunks */
12950 /* init RSA engine to clear lingering errors */
12951 write_csr(dd, MISC_CFG_RSA_CMD, 1);
12952 write_csr(dd, MISC_CFG_RSA_MU, 0);
12953 write_csr(dd, MISC_CFG_FW_CTRL, 0);
12954 /* MISC_STS_8051_DIGEST read-only */
12955 /* MISC_STS_SBM_DIGEST read-only */
12956 /* MISC_STS_PCIE_DIGEST read-only */
12957 /* MISC_STS_FAB_DIGEST read-only */
12958 /* MISC_ERR_STATUS read-only */
12959 write_csr(dd, MISC_ERR_MASK, 0);
12960 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
12961 /* MISC_ERR_FORCE leave alone */
12962}
12963
12964/* set TXE CSRs to chip reset defaults */
12965static void reset_txe_csrs(struct hfi1_devdata *dd)
12966{
12967 int i;
12968
12969 /*
12970 * TXE Kernel CSRs
12971 */
12972 write_csr(dd, SEND_CTRL, 0);
12973 __cm_reset(dd, 0); /* reset CM internal state */
12974 /* SEND_CONTEXTS read-only */
12975 /* SEND_DMA_ENGINES read-only */
12976 /* SEND_PIO_MEM_SIZE read-only */
12977 /* SEND_DMA_MEM_SIZE read-only */
12978 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
12979 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
12980 /* SEND_PIO_ERR_STATUS read-only */
12981 write_csr(dd, SEND_PIO_ERR_MASK, 0);
12982 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
12983 /* SEND_PIO_ERR_FORCE leave alone */
12984 /* SEND_DMA_ERR_STATUS read-only */
12985 write_csr(dd, SEND_DMA_ERR_MASK, 0);
12986 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
12987 /* SEND_DMA_ERR_FORCE leave alone */
12988 /* SEND_EGRESS_ERR_STATUS read-only */
12989 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
12990 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
12991 /* SEND_EGRESS_ERR_FORCE leave alone */
12992 write_csr(dd, SEND_BTH_QP, 0);
12993 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
12994 write_csr(dd, SEND_SC2VLT0, 0);
12995 write_csr(dd, SEND_SC2VLT1, 0);
12996 write_csr(dd, SEND_SC2VLT2, 0);
12997 write_csr(dd, SEND_SC2VLT3, 0);
12998 write_csr(dd, SEND_LEN_CHECK0, 0);
12999 write_csr(dd, SEND_LEN_CHECK1, 0);
13000 /* SEND_ERR_STATUS read-only */
13001 write_csr(dd, SEND_ERR_MASK, 0);
13002 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13003 /* SEND_ERR_FORCE read-only */
13004 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13005 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
13006 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13007 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
13008 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
13009 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
13010 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13011 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
13012 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13013 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
13014 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13015 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
13016 SEND_CM_GLOBAL_CREDIT_RESETCSR);
13017 /* SEND_CM_CREDIT_USED_STATUS read-only */
13018 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13019 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13020 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13021 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13022 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13023 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13024 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
13025 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13026 /* SEND_CM_CREDIT_USED_VL read-only */
13027 /* SEND_CM_CREDIT_USED_VL15 read-only */
13028 /* SEND_EGRESS_CTXT_STATUS read-only */
13029 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13030 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13031 /* SEND_EGRESS_ERR_INFO read-only */
13032 /* SEND_EGRESS_ERR_SOURCE read-only */
13033
13034 /*
13035 * TXE Per-Context CSRs
13036 */
13037 for (i = 0; i < dd->chip_send_contexts; i++) {
13038 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13039 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13040 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13041 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13042 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13043 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13044 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13045 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13046 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13047 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13048 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13049 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13050 }
13051
13052 /*
13053 * TXE Per-SDMA CSRs
13054 */
13055 for (i = 0; i < dd->chip_sdma_engines; i++) {
13056 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13057 /* SEND_DMA_STATUS read-only */
13058 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13059 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13060 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13061 /* SEND_DMA_HEAD read-only */
13062 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13063 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13064 /* SEND_DMA_IDLE_CNT read-only */
13065 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13066 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13067 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13068 /* SEND_DMA_ENG_ERR_STATUS read-only */
13069 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13070 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13071 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13072 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13073 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13074 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13075 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13076 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13077 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13078 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13079 }
13080}
13081
13082/*
13083 * Expect on entry:
13084 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13085 */
13086static void init_rbufs(struct hfi1_devdata *dd)
13087{
13088 u64 reg;
13089 int count;
13090
13091 /*
13092 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13093 * clear.
13094 */
13095 count = 0;
13096 while (1) {
13097 reg = read_csr(dd, RCV_STATUS);
13098 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13099 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13100 break;
13101 /*
13102 * Give up after 1ms - maximum wait time.
13103 *
13104 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13105 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13106 * 148 KB / (66% * 250MB/s) = 920us
13107 */
13108 if (count++ > 500) {
13109 dd_dev_err(dd,
13110 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13111 __func__, reg);
13112 break;
13113 }
13114 udelay(2); /* do not busy-wait the CSR */
13115 }
13116
13117 /* start the init - expect RcvCtrl to be 0 */
13118 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13119
13120 /*
13121 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13122 * period after the write before RcvStatus.RxRbufInitDone is valid.
13123 * The delay in the first run through the loop below is sufficient and
13124 * required before the first read of RcvStatus.RxRbufInintDone.
13125 */
13126 read_csr(dd, RCV_CTRL);
13127
13128 /* wait for the init to finish */
13129 count = 0;
13130 while (1) {
13131 /* delay is required first time through - see above */
13132 udelay(2); /* do not busy-wait the CSR */
13133 reg = read_csr(dd, RCV_STATUS);
13134 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13135 break;
13136
13137 /* give up after 100us - slowest possible at 33MHz is 73us */
13138 if (count++ > 50) {
13139 dd_dev_err(dd,
13140 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13141 __func__);
13142 break;
13143 }
13144 }
13145}
13146
13147/* set RXE CSRs to chip reset defaults */
13148static void reset_rxe_csrs(struct hfi1_devdata *dd)
13149{
13150 int i, j;
13151
13152 /*
13153 * RXE Kernel CSRs
13154 */
13155 write_csr(dd, RCV_CTRL, 0);
13156 init_rbufs(dd);
13157 /* RCV_STATUS read-only */
13158 /* RCV_CONTEXTS read-only */
13159 /* RCV_ARRAY_CNT read-only */
13160 /* RCV_BUF_SIZE read-only */
13161 write_csr(dd, RCV_BTH_QP, 0);
13162 write_csr(dd, RCV_MULTICAST, 0);
13163 write_csr(dd, RCV_BYPASS, 0);
13164 write_csr(dd, RCV_VL15, 0);
13165 /* this is a clear-down */
13166 write_csr(dd, RCV_ERR_INFO,
13167 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13168 /* RCV_ERR_STATUS read-only */
13169 write_csr(dd, RCV_ERR_MASK, 0);
13170 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13171 /* RCV_ERR_FORCE leave alone */
13172 for (i = 0; i < 32; i++)
13173 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13174 for (i = 0; i < 4; i++)
13175 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13176 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13177 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13178 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13179 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13180 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13181 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13182 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13183 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13184 }
13185 for (i = 0; i < 32; i++)
13186 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13187
13188 /*
13189 * RXE Kernel and User Per-Context CSRs
13190 */
13191 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13192 /* kernel */
13193 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13194 /* RCV_CTXT_STATUS read-only */
13195 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13196 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13197 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13198 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13199 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13200 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13201 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13202 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13203 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13204 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13205
13206 /* user */
13207 /* RCV_HDR_TAIL read-only */
13208 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13209 /* RCV_EGR_INDEX_TAIL read-only */
13210 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13211 /* RCV_EGR_OFFSET_TAIL read-only */
13212 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13213 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13214 0);
13215 }
13216 }
13217}
13218
13219/*
13220 * Set sc2vl tables.
13221 *
13222 * They power on to zeros, so to avoid send context errors
13223 * they need to be set:
13224 *
13225 * SC 0-7 -> VL 0-7 (respectively)
13226 * SC 15 -> VL 15
13227 * otherwise
13228 * -> VL 0
13229 */
13230static void init_sc2vl_tables(struct hfi1_devdata *dd)
13231{
13232 int i;
13233 /* init per architecture spec, constrained by hardware capability */
13234
13235 /* HFI maps sent packets */
13236 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13237 0,
13238 0, 0, 1, 1,
13239 2, 2, 3, 3,
13240 4, 4, 5, 5,
13241 6, 6, 7, 7));
13242 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13243 1,
13244 8, 0, 9, 0,
13245 10, 0, 11, 0,
13246 12, 0, 13, 0,
13247 14, 0, 15, 15));
13248 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13249 2,
13250 16, 0, 17, 0,
13251 18, 0, 19, 0,
13252 20, 0, 21, 0,
13253 22, 0, 23, 0));
13254 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13255 3,
13256 24, 0, 25, 0,
13257 26, 0, 27, 0,
13258 28, 0, 29, 0,
13259 30, 0, 31, 0));
13260
13261 /* DC maps received packets */
13262 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13263 15_0,
13264 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13265 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13266 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13267 31_16,
13268 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13269 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13270
13271 /* initialize the cached sc2vl values consistently with h/w */
13272 for (i = 0; i < 32; i++) {
13273 if (i < 8 || i == 15)
13274 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13275 else
13276 *((u8 *)(dd->sc2vl) + i) = 0;
13277 }
13278}
13279
13280/*
13281 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13282 * depend on the chip going through a power-on reset - a driver may be loaded
13283 * and unloaded many times.
13284 *
13285 * Do not write any CSR values to the chip in this routine - there may be
13286 * a reset following the (possible) FLR in this routine.
13287 *
13288 */
13289static void init_chip(struct hfi1_devdata *dd)
13290{
13291 int i;
13292
13293 /*
13294 * Put the HFI CSRs in a known state.
13295 * Combine this with a DC reset.
13296 *
13297 * Stop the device from doing anything while we do a
13298 * reset. We know there are no other active users of
13299 * the device since we are now in charge. Turn off
13300 * off all outbound and inbound traffic and make sure
13301 * the device does not generate any interrupts.
13302 */
13303
13304 /* disable send contexts and SDMA engines */
13305 write_csr(dd, SEND_CTRL, 0);
13306 for (i = 0; i < dd->chip_send_contexts; i++)
13307 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13308 for (i = 0; i < dd->chip_sdma_engines; i++)
13309 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13310 /* disable port (turn off RXE inbound traffic) and contexts */
13311 write_csr(dd, RCV_CTRL, 0);
13312 for (i = 0; i < dd->chip_rcv_contexts; i++)
13313 write_csr(dd, RCV_CTXT_CTRL, 0);
13314 /* mask all interrupt sources */
13315 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13316 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13317
13318 /*
13319 * DC Reset: do a full DC reset before the register clear.
13320 * A recommended length of time to hold is one CSR read,
13321 * so reread the CceDcCtrl. Then, hold the DC in reset
13322 * across the clear.
13323 */
13324 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13325 (void) read_csr(dd, CCE_DC_CTRL);
13326
13327 if (use_flr) {
13328 /*
13329 * A FLR will reset the SPC core and part of the PCIe.
13330 * The parts that need to be restored have already been
13331 * saved.
13332 */
13333 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13334
13335 /* do the FLR, the DC reset will remain */
13336 hfi1_pcie_flr(dd);
13337
13338 /* restore command and BARs */
13339 restore_pci_variables(dd);
13340
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013341 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013342 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13343 hfi1_pcie_flr(dd);
13344 restore_pci_variables(dd);
13345 }
13346
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013347 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013348 } else {
13349 dd_dev_info(dd, "Resetting CSRs with writes\n");
13350 reset_cce_csrs(dd);
13351 reset_txe_csrs(dd);
13352 reset_rxe_csrs(dd);
13353 reset_asic_csrs(dd);
13354 reset_misc_csrs(dd);
13355 }
13356 /* clear the DC reset */
13357 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013358
Mike Marciniszyn77241052015-07-30 15:17:43 -040013359 /* Set the LED off */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013360 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013361 setextled(dd, 0);
13362 /*
13363 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013364 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013365 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013366 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013367 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013368 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013369 * I2CCLK and I2CDAT will change per direction, and INT_N and
13370 * MODPRS_N are input only and their value is ignored.
13371 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013372 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13373 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013374}
13375
13376static void init_early_variables(struct hfi1_devdata *dd)
13377{
13378 int i;
13379
13380 /* assign link credit variables */
13381 dd->vau = CM_VAU;
13382 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013383 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013384 dd->link_credits--;
13385 dd->vcu = cu_to_vcu(hfi1_cu);
13386 /* enough room for 8 MAD packets plus header - 17K */
13387 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13388 if (dd->vl15_init > dd->link_credits)
13389 dd->vl15_init = dd->link_credits;
13390
13391 write_uninitialized_csrs_and_memories(dd);
13392
13393 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13394 for (i = 0; i < dd->num_pports; i++) {
13395 struct hfi1_pportdata *ppd = &dd->pport[i];
13396
13397 set_partition_keys(ppd);
13398 }
13399 init_sc2vl_tables(dd);
13400}
13401
13402static void init_kdeth_qp(struct hfi1_devdata *dd)
13403{
13404 /* user changed the KDETH_QP */
13405 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13406 /* out of range or illegal value */
13407 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13408 kdeth_qp = 0;
13409 }
13410 if (kdeth_qp == 0) /* not set, or failed range check */
13411 kdeth_qp = DEFAULT_KDETH_QP;
13412
13413 write_csr(dd, SEND_BTH_QP,
13414 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13415 << SEND_BTH_QP_KDETH_QP_SHIFT);
13416
13417 write_csr(dd, RCV_BTH_QP,
13418 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13419 << RCV_BTH_QP_KDETH_QP_SHIFT);
13420}
13421
13422/**
13423 * init_qpmap_table
13424 * @dd - device data
13425 * @first_ctxt - first context
13426 * @last_ctxt - first context
13427 *
13428 * This return sets the qpn mapping table that
13429 * is indexed by qpn[8:1].
13430 *
13431 * The routine will round robin the 256 settings
13432 * from first_ctxt to last_ctxt.
13433 *
13434 * The first/last looks ahead to having specialized
13435 * receive contexts for mgmt and bypass. Normal
13436 * verbs traffic will assumed to be on a range
13437 * of receive contexts.
13438 */
13439static void init_qpmap_table(struct hfi1_devdata *dd,
13440 u32 first_ctxt,
13441 u32 last_ctxt)
13442{
13443 u64 reg = 0;
13444 u64 regno = RCV_QP_MAP_TABLE;
13445 int i;
13446 u64 ctxt = first_ctxt;
13447
13448 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013449 reg |= ctxt << (8 * (i % 8));
13450 i++;
13451 ctxt++;
13452 if (ctxt > last_ctxt)
13453 ctxt = first_ctxt;
13454 if (i % 8 == 0) {
13455 write_csr(dd, regno, reg);
13456 reg = 0;
13457 regno += 8;
13458 }
13459 }
13460 if (i % 8)
13461 write_csr(dd, regno, reg);
13462
13463 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13464 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13465}
13466
13467/**
13468 * init_qos - init RX qos
13469 * @dd - device data
13470 * @first_context
13471 *
13472 * This routine initializes Rule 0 and the
13473 * RSM map table to implement qos.
13474 *
13475 * If all of the limit tests succeed,
13476 * qos is applied based on the array
13477 * interpretation of krcvqs where
13478 * entry 0 is VL0.
13479 *
13480 * The number of vl bits (n) and the number of qpn
13481 * bits (m) are computed to feed both the RSM map table
13482 * and the single rule.
13483 *
13484 */
13485static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13486{
13487 u8 max_by_vl = 0;
13488 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13489 u64 *rsmmap;
13490 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013491 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013492
13493 /* validate */
13494 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13495 num_vls == 1 ||
13496 krcvqsset <= 1)
13497 goto bail;
13498 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13499 if (krcvqs[i] > max_by_vl)
13500 max_by_vl = krcvqs[i];
13501 if (max_by_vl > 32)
13502 goto bail;
13503 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13504 /* determine bits vl */
13505 n = ilog2(num_vls);
13506 /* determine bits for qpn */
13507 m = ilog2(qpns_per_vl);
13508 if ((m + n) > 7)
13509 goto bail;
13510 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13511 goto bail;
13512 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013513 if (!rsmmap)
13514 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013515 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13516 /* init the local copy of the table */
13517 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13518 unsigned tctxt;
13519
13520 for (qpn = 0, tctxt = ctxt;
13521 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13522 unsigned idx, regoff, regidx;
13523
13524 /* generate index <= 128 */
13525 idx = (qpn << n) ^ i;
13526 regoff = (idx % 8) * 8;
13527 regidx = idx / 8;
13528 reg = rsmmap[regidx];
13529 /* replace 0xff with context number */
13530 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13531 << regoff);
13532 reg |= (u64)(tctxt++) << regoff;
13533 rsmmap[regidx] = reg;
13534 if (tctxt == ctxt + krcvqs[i])
13535 tctxt = ctxt;
13536 }
13537 ctxt += krcvqs[i];
13538 }
13539 /* flush cached copies to chip */
13540 for (i = 0; i < NUM_MAP_REGS; i++)
13541 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13542 /* add rule0 */
13543 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13544 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13545 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13546 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13547 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13548 LRH_BTH_MATCH_OFFSET
13549 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13550 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13551 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13552 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13553 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13554 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13555 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13556 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13557 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13558 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13559 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13560 /* Enable RSM */
13561 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13562 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013563 /* map everything else to first context */
13564 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013565 dd->qos_shift = n + 1;
13566 return;
13567bail:
13568 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013569 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013570}
13571
13572static void init_rxe(struct hfi1_devdata *dd)
13573{
13574 /* enable all receive errors */
13575 write_csr(dd, RCV_ERR_MASK, ~0ull);
13576 /* setup QPN map table - start where VL15 context leaves off */
13577 init_qos(
13578 dd,
13579 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13580 /*
13581 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13582 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13583 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13584 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13585 * Max_PayLoad_Size set to its minimum of 128.
13586 *
13587 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13588 * (64 bytes). Max_Payload_Size is possibly modified upward in
13589 * tune_pcie_caps() which is called after this routine.
13590 */
13591}
13592
13593static void init_other(struct hfi1_devdata *dd)
13594{
13595 /* enable all CCE errors */
13596 write_csr(dd, CCE_ERR_MASK, ~0ull);
13597 /* enable *some* Misc errors */
13598 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13599 /* enable all DC errors, except LCB */
13600 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13601 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13602}
13603
13604/*
13605 * Fill out the given AU table using the given CU. A CU is defined in terms
13606 * AUs. The table is a an encoding: given the index, how many AUs does that
13607 * represent?
13608 *
13609 * NOTE: Assumes that the register layout is the same for the
13610 * local and remote tables.
13611 */
13612static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13613 u32 csr0to3, u32 csr4to7)
13614{
13615 write_csr(dd, csr0to3,
13616 0ull <<
13617 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13618 | 1ull <<
13619 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13620 | 2ull * cu <<
13621 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13622 | 4ull * cu <<
13623 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13624 write_csr(dd, csr4to7,
13625 8ull * cu <<
13626 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13627 | 16ull * cu <<
13628 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13629 | 32ull * cu <<
13630 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13631 | 64ull * cu <<
13632 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13633
13634}
13635
13636static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13637{
13638 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13639 SEND_CM_LOCAL_AU_TABLE4_TO7);
13640}
13641
13642void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13643{
13644 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13645 SEND_CM_REMOTE_AU_TABLE4_TO7);
13646}
13647
13648static void init_txe(struct hfi1_devdata *dd)
13649{
13650 int i;
13651
13652 /* enable all PIO, SDMA, general, and Egress errors */
13653 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13654 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13655 write_csr(dd, SEND_ERR_MASK, ~0ull);
13656 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13657
13658 /* enable all per-context and per-SDMA engine errors */
13659 for (i = 0; i < dd->chip_send_contexts; i++)
13660 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13661 for (i = 0; i < dd->chip_sdma_engines; i++)
13662 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13663
13664 /* set the local CU to AU mapping */
13665 assign_local_cm_au_table(dd, dd->vcu);
13666
13667 /*
13668 * Set reasonable default for Credit Return Timer
13669 * Don't set on Simulator - causes it to choke.
13670 */
13671 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13672 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13673}
13674
13675int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13676{
13677 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13678 unsigned sctxt;
13679 int ret = 0;
13680 u64 reg;
13681
13682 if (!rcd || !rcd->sc) {
13683 ret = -EINVAL;
13684 goto done;
13685 }
13686 sctxt = rcd->sc->hw_context;
13687 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13688 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13689 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13690 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13691 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13692 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13693 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13694 /*
13695 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013696 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013697 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013698 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13699 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13700 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13701 }
13702
13703 /* Enable J_KEY check on receive context. */
13704 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13705 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13706 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13707 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13708done:
13709 return ret;
13710}
13711
13712int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13713{
13714 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13715 unsigned sctxt;
13716 int ret = 0;
13717 u64 reg;
13718
13719 if (!rcd || !rcd->sc) {
13720 ret = -EINVAL;
13721 goto done;
13722 }
13723 sctxt = rcd->sc->hw_context;
13724 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13725 /*
13726 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13727 * This check would not have been enabled for A0 h/w, see
13728 * set_ctxt_jkey().
13729 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013730 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013731 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13732 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13733 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13734 }
13735 /* Turn off the J_KEY on the receive side */
13736 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13737done:
13738 return ret;
13739}
13740
13741int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13742{
13743 struct hfi1_ctxtdata *rcd;
13744 unsigned sctxt;
13745 int ret = 0;
13746 u64 reg;
13747
13748 if (ctxt < dd->num_rcv_contexts)
13749 rcd = dd->rcd[ctxt];
13750 else {
13751 ret = -EINVAL;
13752 goto done;
13753 }
13754 if (!rcd || !rcd->sc) {
13755 ret = -EINVAL;
13756 goto done;
13757 }
13758 sctxt = rcd->sc->hw_context;
13759 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13760 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13761 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13762 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13763 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13764 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13765done:
13766 return ret;
13767}
13768
13769int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13770{
13771 struct hfi1_ctxtdata *rcd;
13772 unsigned sctxt;
13773 int ret = 0;
13774 u64 reg;
13775
13776 if (ctxt < dd->num_rcv_contexts)
13777 rcd = dd->rcd[ctxt];
13778 else {
13779 ret = -EINVAL;
13780 goto done;
13781 }
13782 if (!rcd || !rcd->sc) {
13783 ret = -EINVAL;
13784 goto done;
13785 }
13786 sctxt = rcd->sc->hw_context;
13787 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13788 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13789 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13790 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13791done:
13792 return ret;
13793}
13794
13795/*
13796 * Start doing the clean up the the chip. Our clean up happens in multiple
13797 * stages and this is just the first.
13798 */
13799void hfi1_start_cleanup(struct hfi1_devdata *dd)
13800{
13801 free_cntrs(dd);
13802 free_rcverr(dd);
13803 clean_up_interrupts(dd);
13804}
13805
13806#define HFI_BASE_GUID(dev) \
13807 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13808
13809/*
13810 * Certain chip functions need to be initialized only once per asic
13811 * instead of per-device. This function finds the peer device and
13812 * checks whether that chip initialization needs to be done by this
13813 * device.
13814 */
13815static void asic_should_init(struct hfi1_devdata *dd)
13816{
13817 unsigned long flags;
13818 struct hfi1_devdata *tmp, *peer = NULL;
13819
13820 spin_lock_irqsave(&hfi1_devs_lock, flags);
13821 /* Find our peer device */
13822 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13823 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13824 dd->unit != tmp->unit) {
13825 peer = tmp;
13826 break;
13827 }
13828 }
13829
13830 /*
13831 * "Claim" the ASIC for initialization if it hasn't been
13832 " "claimed" yet.
13833 */
13834 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13835 dd->flags |= HFI1_DO_INIT_ASIC;
13836 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13837}
13838
Dean Luick5d9157a2015-11-16 21:59:34 -050013839/*
13840 * Set dd->boardname. Use a generic name if a name is not returned from
13841 * EFI variable space.
13842 *
13843 * Return 0 on success, -ENOMEM if space could not be allocated.
13844 */
13845static int obtain_boardname(struct hfi1_devdata *dd)
13846{
13847 /* generic board description */
13848 const char generic[] =
13849 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13850 unsigned long size;
13851 int ret;
13852
13853 ret = read_hfi1_efi_var(dd, "description", &size,
13854 (void **)&dd->boardname);
13855 if (ret) {
13856 dd_dev_err(dd, "Board description not found\n");
13857 /* use generic description */
13858 dd->boardname = kstrdup(generic, GFP_KERNEL);
13859 if (!dd->boardname)
13860 return -ENOMEM;
13861 }
13862 return 0;
13863}
13864
Mike Marciniszyn77241052015-07-30 15:17:43 -040013865/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013866 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013867 * @dev: the pci_dev for hfi1_ib device
13868 * @ent: pci_device_id struct for this dev
13869 *
13870 * Also allocates, initializes, and returns the devdata struct for this
13871 * device instance
13872 *
13873 * This is global, and is called directly at init to set up the
13874 * chip-specific function pointers for later use.
13875 */
13876struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
13877 const struct pci_device_id *ent)
13878{
13879 struct hfi1_devdata *dd;
13880 struct hfi1_pportdata *ppd;
13881 u64 reg;
13882 int i, ret;
13883 static const char * const inames[] = { /* implementation names */
13884 "RTL silicon",
13885 "RTL VCS simulation",
13886 "RTL FPGA emulation",
13887 "Functional simulator"
13888 };
13889
13890 dd = hfi1_alloc_devdata(pdev,
13891 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
13892 if (IS_ERR(dd))
13893 goto bail;
13894 ppd = dd->pport;
13895 for (i = 0; i < dd->num_pports; i++, ppd++) {
13896 int vl;
13897 /* init common fields */
13898 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
13899 /* DC supports 4 link widths */
13900 ppd->link_width_supported =
13901 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
13902 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
13903 ppd->link_width_downgrade_supported =
13904 ppd->link_width_supported;
13905 /* start out enabling only 4X */
13906 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
13907 ppd->link_width_downgrade_enabled =
13908 ppd->link_width_downgrade_supported;
13909 /* link width active is 0 when link is down */
13910 /* link width downgrade active is 0 when link is down */
13911
13912 if (num_vls < HFI1_MIN_VLS_SUPPORTED
13913 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
13914 hfi1_early_err(&pdev->dev,
13915 "Invalid num_vls %u, using %u VLs\n",
13916 num_vls, HFI1_MAX_VLS_SUPPORTED);
13917 num_vls = HFI1_MAX_VLS_SUPPORTED;
13918 }
13919 ppd->vls_supported = num_vls;
13920 ppd->vls_operational = ppd->vls_supported;
13921 /* Set the default MTU. */
13922 for (vl = 0; vl < num_vls; vl++)
13923 dd->vld[vl].mtu = hfi1_max_mtu;
13924 dd->vld[15].mtu = MAX_MAD_PACKET;
13925 /*
13926 * Set the initial values to reasonable default, will be set
13927 * for real when link is up.
13928 */
13929 ppd->lstate = IB_PORT_DOWN;
13930 ppd->overrun_threshold = 0x4;
13931 ppd->phy_error_threshold = 0xf;
13932 ppd->port_crc_mode_enabled = link_crc_mask;
13933 /* initialize supported LTP CRC mode */
13934 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
13935 /* initialize enabled LTP CRC mode */
13936 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
13937 /* start in offline */
13938 ppd->host_link_state = HLS_DN_OFFLINE;
13939 init_vl_arb_caches(ppd);
13940 }
13941
13942 dd->link_default = HLS_DN_POLL;
13943
13944 /*
13945 * Do remaining PCIe setup and save PCIe values in dd.
13946 * Any error printing is already done by the init code.
13947 * On return, we have the chip mapped.
13948 */
13949 ret = hfi1_pcie_ddinit(dd, pdev, ent);
13950 if (ret < 0)
13951 goto bail_free;
13952
13953 /* verify that reads actually work, save revision for reset check */
13954 dd->revision = read_csr(dd, CCE_REVISION);
13955 if (dd->revision == ~(u64)0) {
13956 dd_dev_err(dd, "cannot read chip CSRs\n");
13957 ret = -EINVAL;
13958 goto bail_cleanup;
13959 }
13960 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
13961 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
13962 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
13963 & CCE_REVISION_CHIP_REV_MINOR_MASK;
13964
13965 /* obtain the hardware ID - NOT related to unit, which is a
13966 software enumeration */
13967 reg = read_csr(dd, CCE_REVISION2);
13968 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
13969 & CCE_REVISION2_HFI_ID_MASK;
13970 /* the variable size will remove unwanted bits */
13971 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
13972 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
13973 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
13974 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
13975 (int)dd->irev);
13976
13977 /* speeds the hardware can support */
13978 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
13979 /* speeds allowed to run at */
13980 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
13981 /* give a reasonable active value, will be set on link up */
13982 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
13983
13984 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
13985 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
13986 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
13987 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
13988 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
13989 /* fix up link widths for emulation _p */
13990 ppd = dd->pport;
13991 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
13992 ppd->link_width_supported =
13993 ppd->link_width_enabled =
13994 ppd->link_width_downgrade_supported =
13995 ppd->link_width_downgrade_enabled =
13996 OPA_LINK_WIDTH_1X;
13997 }
13998 /* insure num_vls isn't larger than number of sdma engines */
13999 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14000 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014001 num_vls, dd->chip_sdma_engines);
14002 num_vls = dd->chip_sdma_engines;
14003 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014004 }
14005
14006 /*
14007 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14008 * Limit the max if larger than the field holds. If timeout is
14009 * non-zero, then the calculated field will be at least 1.
14010 *
14011 * Must be after icode is set up - the cclock rate depends
14012 * on knowing the hardware being used.
14013 */
14014 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14015 if (dd->rcv_intr_timeout_csr >
14016 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14017 dd->rcv_intr_timeout_csr =
14018 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14019 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14020 dd->rcv_intr_timeout_csr = 1;
14021
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014022 /* needs to be done before we look for the peer device */
14023 read_guid(dd);
14024
14025 /* should this device init the ASIC block? */
14026 asic_should_init(dd);
14027
Mike Marciniszyn77241052015-07-30 15:17:43 -040014028 /* obtain chip sizes, reset chip CSRs */
14029 init_chip(dd);
14030
14031 /* read in the PCIe link speed information */
14032 ret = pcie_speeds(dd);
14033 if (ret)
14034 goto bail_cleanup;
14035
Mike Marciniszyn77241052015-07-30 15:17:43 -040014036 /* read in firmware */
14037 ret = hfi1_firmware_init(dd);
14038 if (ret)
14039 goto bail_cleanup;
14040
14041 /*
14042 * In general, the PCIe Gen3 transition must occur after the
14043 * chip has been idled (so it won't initiate any PCIe transactions
14044 * e.g. an interrupt) and before the driver changes any registers
14045 * (the transition will reset the registers).
14046 *
14047 * In particular, place this call after:
14048 * - init_chip() - the chip will not initiate any PCIe transactions
14049 * - pcie_speeds() - reads the current link speed
14050 * - hfi1_firmware_init() - the needed firmware is ready to be
14051 * downloaded
14052 */
14053 ret = do_pcie_gen3_transition(dd);
14054 if (ret)
14055 goto bail_cleanup;
14056
14057 /* start setting dd values and adjusting CSRs */
14058 init_early_variables(dd);
14059
14060 parse_platform_config(dd);
14061
Dean Luick5d9157a2015-11-16 21:59:34 -050014062 ret = obtain_boardname(dd);
14063 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014064 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014065
14066 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014067 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014068 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014069 (u32)dd->majrev,
14070 (u32)dd->minrev,
14071 (dd->revision >> CCE_REVISION_SW_SHIFT)
14072 & CCE_REVISION_SW_MASK);
14073
14074 ret = set_up_context_variables(dd);
14075 if (ret)
14076 goto bail_cleanup;
14077
14078 /* set initial RXE CSRs */
14079 init_rxe(dd);
14080 /* set initial TXE CSRs */
14081 init_txe(dd);
14082 /* set initial non-RXE, non-TXE CSRs */
14083 init_other(dd);
14084 /* set up KDETH QP prefix in both RX and TX CSRs */
14085 init_kdeth_qp(dd);
14086
14087 /* send contexts must be set up before receive contexts */
14088 ret = init_send_contexts(dd);
14089 if (ret)
14090 goto bail_cleanup;
14091
14092 ret = hfi1_create_ctxts(dd);
14093 if (ret)
14094 goto bail_cleanup;
14095
14096 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14097 /*
14098 * rcd[0] is guaranteed to be valid by this point. Also, all
14099 * context are using the same value, as per the module parameter.
14100 */
14101 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14102
14103 ret = init_pervl_scs(dd);
14104 if (ret)
14105 goto bail_cleanup;
14106
14107 /* sdma init */
14108 for (i = 0; i < dd->num_pports; ++i) {
14109 ret = sdma_init(dd, i);
14110 if (ret)
14111 goto bail_cleanup;
14112 }
14113
14114 /* use contexts created by hfi1_create_ctxts */
14115 ret = set_up_interrupts(dd);
14116 if (ret)
14117 goto bail_cleanup;
14118
14119 /* set up LCB access - must be after set_up_interrupts() */
14120 init_lcb_access(dd);
14121
14122 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14123 dd->base_guid & 0xFFFFFF);
14124
14125 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14126 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14127 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14128
14129 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14130 if (ret)
14131 goto bail_clear_intr;
14132 check_fabric_firmware_versions(dd);
14133
14134 thermal_init(dd);
14135
14136 ret = init_cntrs(dd);
14137 if (ret)
14138 goto bail_clear_intr;
14139
14140 ret = init_rcverr(dd);
14141 if (ret)
14142 goto bail_free_cntrs;
14143
14144 ret = eprom_init(dd);
14145 if (ret)
14146 goto bail_free_rcverr;
14147
14148 goto bail;
14149
14150bail_free_rcverr:
14151 free_rcverr(dd);
14152bail_free_cntrs:
14153 free_cntrs(dd);
14154bail_clear_intr:
14155 clean_up_interrupts(dd);
14156bail_cleanup:
14157 hfi1_pcie_ddcleanup(dd);
14158bail_free:
14159 hfi1_free_devdata(dd);
14160 dd = ERR_PTR(ret);
14161bail:
14162 return dd;
14163}
14164
14165static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14166 u32 dw_len)
14167{
14168 u32 delta_cycles;
14169 u32 current_egress_rate = ppd->current_egress_rate;
14170 /* rates here are in units of 10^6 bits/sec */
14171
14172 if (desired_egress_rate == -1)
14173 return 0; /* shouldn't happen */
14174
14175 if (desired_egress_rate >= current_egress_rate)
14176 return 0; /* we can't help go faster, only slower */
14177
14178 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14179 egress_cycles(dw_len * 4, current_egress_rate);
14180
14181 return (u16)delta_cycles;
14182}
14183
14184
14185/**
14186 * create_pbc - build a pbc for transmission
14187 * @flags: special case flags or-ed in built pbc
14188 * @srate: static rate
14189 * @vl: vl
14190 * @dwlen: dword length (header words + data words + pbc words)
14191 *
14192 * Create a PBC with the given flags, rate, VL, and length.
14193 *
14194 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14195 * for verbs, which does not use this PSM feature. The lone other caller
14196 * is for the diagnostic interface which calls this if the user does not
14197 * supply their own PBC.
14198 */
14199u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14200 u32 dw_len)
14201{
14202 u64 pbc, delay = 0;
14203
14204 if (unlikely(srate_mbs))
14205 delay = delay_cycles(ppd, srate_mbs, dw_len);
14206
14207 pbc = flags
14208 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14209 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14210 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14211 | (dw_len & PBC_LENGTH_DWS_MASK)
14212 << PBC_LENGTH_DWS_SHIFT;
14213
14214 return pbc;
14215}
14216
14217#define SBUS_THERMAL 0x4f
14218#define SBUS_THERM_MONITOR_MODE 0x1
14219
14220#define THERM_FAILURE(dev, ret, reason) \
14221 dd_dev_err((dd), \
14222 "Thermal sensor initialization failed: %s (%d)\n", \
14223 (reason), (ret))
14224
14225/*
14226 * Initialize the Avago Thermal sensor.
14227 *
14228 * After initialization, enable polling of thermal sensor through
14229 * SBus interface. In order for this to work, the SBus Master
14230 * firmware has to be loaded due to the fact that the HW polling
14231 * logic uses SBus interrupts, which are not supported with
14232 * default firmware. Otherwise, no data will be returned through
14233 * the ASIC_STS_THERM CSR.
14234 */
14235static int thermal_init(struct hfi1_devdata *dd)
14236{
14237 int ret = 0;
14238
14239 if (dd->icode != ICODE_RTL_SILICON ||
14240 !(dd->flags & HFI1_DO_INIT_ASIC))
14241 return ret;
14242
14243 acquire_hw_mutex(dd);
14244 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014245 /* Disable polling of thermal readings */
14246 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14247 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014248 /* Thermal Sensor Initialization */
14249 /* Step 1: Reset the Thermal SBus Receiver */
14250 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14251 RESET_SBUS_RECEIVER, 0);
14252 if (ret) {
14253 THERM_FAILURE(dd, ret, "Bus Reset");
14254 goto done;
14255 }
14256 /* Step 2: Set Reset bit in Thermal block */
14257 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14258 WRITE_SBUS_RECEIVER, 0x1);
14259 if (ret) {
14260 THERM_FAILURE(dd, ret, "Therm Block Reset");
14261 goto done;
14262 }
14263 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14264 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14265 WRITE_SBUS_RECEIVER, 0x32);
14266 if (ret) {
14267 THERM_FAILURE(dd, ret, "Write Clock Div");
14268 goto done;
14269 }
14270 /* Step 4: Select temperature mode */
14271 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14272 WRITE_SBUS_RECEIVER,
14273 SBUS_THERM_MONITOR_MODE);
14274 if (ret) {
14275 THERM_FAILURE(dd, ret, "Write Mode Sel");
14276 goto done;
14277 }
14278 /* Step 5: De-assert block reset and start conversion */
14279 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14280 WRITE_SBUS_RECEIVER, 0x2);
14281 if (ret) {
14282 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14283 goto done;
14284 }
14285 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14286 msleep(22);
14287
14288 /* Enable polling of thermal readings */
14289 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14290done:
14291 release_hw_mutex(dd);
14292 return ret;
14293}
14294
14295static void handle_temp_err(struct hfi1_devdata *dd)
14296{
14297 struct hfi1_pportdata *ppd = &dd->pport[0];
14298 /*
14299 * Thermal Critical Interrupt
14300 * Put the device into forced freeze mode, take link down to
14301 * offline, and put DC into reset.
14302 */
14303 dd_dev_emerg(dd,
14304 "Critical temperature reached! Forcing device into freeze mode!\n");
14305 dd->flags |= HFI1_FORCED_FREEZE;
14306 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14307 /*
14308 * Shut DC down as much and as quickly as possible.
14309 *
14310 * Step 1: Take the link down to OFFLINE. This will cause the
14311 * 8051 to put the Serdes in reset. However, we don't want to
14312 * go through the entire link state machine since we want to
14313 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14314 * but rather an attempt to save the chip.
14315 * Code below is almost the same as quiet_serdes() but avoids
14316 * all the extra work and the sleeps.
14317 */
14318 ppd->driver_link_ready = 0;
14319 ppd->link_enabled = 0;
14320 set_physical_link_state(dd, PLS_OFFLINE |
14321 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14322 /*
14323 * Step 2: Shutdown LCB and 8051
14324 * After shutdown, do not restore DC_CFG_RESET value.
14325 */
14326 dc_shutdown(dd);
14327}