blob: d10ba6732e72ab7aa63842ec86d537fdccfcfa31 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * This file contains all of the code that is specific to the HFI chip
53 */
54
55#include <linux/pci.h>
56#include <linux/delay.h>
57#include <linux/interrupt.h>
58#include <linux/module.h>
59
60#include "hfi.h"
61#include "trace.h"
62#include "mad.h"
63#include "pio.h"
64#include "sdma.h"
65#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050066#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080067#include "platform.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500127#define FIRST_KERNEL_KCTXT 1
Mike Marciniszyn77241052015-07-30 15:17:43 -0400128#define NUM_MAP_REGS 32
129
130/* Bit offset into the GUID which carries HFI id information */
131#define GUID_HFI_INDEX_SHIFT 39
132
133/* extract the emulation revision */
134#define emulator_rev(dd) ((dd)->irev >> 8)
135/* parallel and serial emulation versions are 3 and 4 respectively */
136#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139/* RSM fields */
140
141/* packet type */
142#define IB_PACKET_TYPE 2ull
143#define QW_SHIFT 6ull
144/* QPN[7..1] */
145#define QPN_WIDTH 7ull
146
147/* LRH.BTH: QW 0, OFFSET 48 - for match */
148#define LRH_BTH_QW 0ull
149#define LRH_BTH_BIT_OFFSET 48ull
150#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
151#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152#define LRH_BTH_SELECT
153#define LRH_BTH_MASK 3ull
154#define LRH_BTH_VALUE 2ull
155
156/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157#define LRH_SC_QW 0ull
158#define LRH_SC_BIT_OFFSET 56ull
159#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
160#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161#define LRH_SC_MASK 128ull
162#define LRH_SC_VALUE 0ull
163
164/* SC[n..0] QW 0, OFFSET 60 - for select */
165#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167/* QPN[m+n:1] QW 1, OFFSET 1 */
168#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
169
170/* defines to build power on SC2VL table */
171#define SC2VL_VAL( \
172 num, \
173 sc0, sc0val, \
174 sc1, sc1val, \
175 sc2, sc2val, \
176 sc3, sc3val, \
177 sc4, sc4val, \
178 sc5, sc5val, \
179 sc6, sc6val, \
180 sc7, sc7val) \
181( \
182 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
190)
191
192#define DC_SC_VL_VAL( \
193 range, \
194 e0, e0val, \
195 e1, e1val, \
196 e2, e2val, \
197 e3, e3val, \
198 e4, e4val, \
199 e5, e5val, \
200 e6, e6val, \
201 e7, e7val, \
202 e8, e8val, \
203 e9, e9val, \
204 e10, e10val, \
205 e11, e11val, \
206 e12, e12val, \
207 e13, e13val, \
208 e14, e14val, \
209 e15, e15val) \
210( \
211 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227)
228
229/* all CceStatus sub-block freeze bits */
230#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231 | CCE_STATUS_RXE_FROZE_SMASK \
232 | CCE_STATUS_TXE_FROZE_SMASK \
233 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234/* all CceStatus sub-block TXE pause bits */
235#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236 | CCE_STATUS_TXE_PAUSED_SMASK \
237 | CCE_STATUS_SDMA_PAUSED_SMASK)
238/* all CceStatus sub-block RXE pause bits */
239#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241/*
242 * CCE Error flags.
243 */
244static struct flag_table cce_err_status_flags[] = {
245/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
246 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
248 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
254 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
256 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
260 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
272 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
274 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
276 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
278 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
280 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
282 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
284 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
286 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
288 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
290 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
292 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
294 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
296 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
298 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
300 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
302 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
304 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307/*31*/ FLAG_ENTRY0("LATriggered",
308 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
310 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
312 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
318 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
320 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
322 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
324 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
326 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327/*41-63 reserved*/
328};
329
330/*
331 * Misc Error flags
332 */
333#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334static struct flag_table misc_err_status_flags[] = {
335/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348};
349
350/*
351 * TXE PIO Error flags and consequences
352 */
353static struct flag_table pio_err_status_flags[] = {
354/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
355 SEC_WRITE_DROPPED,
356 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
358 SEC_SPC_FREEZE,
359 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360/* 2*/ FLAG_ENTRY("PioCsrParity",
361 SEC_SPC_FREEZE,
362 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369/* 5*/ FLAG_ENTRY("PioPccFifoParity",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372/* 6*/ FLAG_ENTRY("PioPecFifoParity",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384/*10*/ FLAG_ENTRY("PioSmPktResetParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
394 0,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
397 0,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
400 SEC_SPC_FREEZE,
401 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
403 SEC_SPC_FREEZE,
404 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405/*17*/ FLAG_ENTRY("PioInitSmIn",
406 0,
407 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
412 SEC_SPC_FREEZE,
413 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
415 0,
416 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417/*21*/ FLAG_ENTRY("PioWriteDataParity",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420/*22*/ FLAG_ENTRY("PioStateMachine",
421 SEC_SPC_FREEZE,
422 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
424 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
425 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
427 SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
428 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
430 SEC_SPC_FREEZE,
431 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432/*26*/ FLAG_ENTRY("PioVlfSopParity",
433 SEC_SPC_FREEZE,
434 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435/*27*/ FLAG_ENTRY("PioVlFifoParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441/*29*/ FLAG_ENTRY("PioPpmcSopLen",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444/*30-31 reserved*/
445/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
449 SEC_SPC_FREEZE,
450 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457/*36-63 reserved*/
458};
459
460/* TXE PIO errors that cause an SPC freeze */
461#define ALL_PIO_FREEZE_ERR \
462 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492/*
493 * TXE SDMA Error flags
494 */
495static struct flag_table sdma_err_status_flags[] = {
496/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
497 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
499 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504/*04-63 reserved*/
505};
506
507/* TXE SDMA errors that cause an SPC freeze */
508#define ALL_SDMA_FREEZE_ERR \
509 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800513/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514#define PORT_DISCARD_EGRESS_ERRS \
515 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518
Mike Marciniszyn77241052015-07-30 15:17:43 -0400519/*
520 * TXE Egress Error flags
521 */
522#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523static struct flag_table egress_err_status_flags[] = {
524/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526/* 2 reserved */
527/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531/* 6 reserved */
532/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536/* 9-10 reserved */
537/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544 SEES(TX_SDMA0_DISALLOWED_PACKET)),
545/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546 SEES(TX_SDMA1_DISALLOWED_PACKET)),
547/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548 SEES(TX_SDMA2_DISALLOWED_PACKET)),
549/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550 SEES(TX_SDMA3_DISALLOWED_PACKET)),
551/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552 SEES(TX_SDMA4_DISALLOWED_PACKET)),
553/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554 SEES(TX_SDMA5_DISALLOWED_PACKET)),
555/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556 SEES(TX_SDMA6_DISALLOWED_PACKET)),
557/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558 SEES(TX_SDMA7_DISALLOWED_PACKET)),
559/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560 SEES(TX_SDMA8_DISALLOWED_PACKET)),
561/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562 SEES(TX_SDMA9_DISALLOWED_PACKET)),
563/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564 SEES(TX_SDMA10_DISALLOWED_PACKET)),
565/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566 SEES(TX_SDMA11_DISALLOWED_PACKET)),
567/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568 SEES(TX_SDMA12_DISALLOWED_PACKET)),
569/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570 SEES(TX_SDMA13_DISALLOWED_PACKET)),
571/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572 SEES(TX_SDMA14_DISALLOWED_PACKET)),
573/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574 SEES(TX_SDMA15_DISALLOWED_PACKET)),
575/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618};
619
620/*
621 * TXE Egress Error Info flags
622 */
623#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624static struct flag_table egress_err_info_flags[] = {
625/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
626/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
627/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
634/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647};
648
649/* TXE Egress errors that cause an SPC freeze */
650#define ALL_TXE_EGRESS_FREEZE_ERR \
651 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655 | SEES(TX_LAUNCH_CSR_PARITY) \
656 | SEES(TX_SBRD_CTL_CSR_PARITY) \
657 | SEES(TX_CONFIG_PARITY) \
658 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667 | SEES(TX_CREDIT_RETURN_PARITY))
668
669/*
670 * TXE Send error flags
671 */
672#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500674/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400675/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677};
678
679/*
680 * TXE Send Context Error flags and consequences
681 */
682static struct flag_table sc_err_status_flags[] = {
683/* 0*/ FLAG_ENTRY("InconsistentSop",
684 SEC_PACKET_DROPPED | SEC_SC_HALTED,
685 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686/* 1*/ FLAG_ENTRY("DisallowedPacket",
687 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
690 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692/* 3*/ FLAG_ENTRY("WriteOverflow",
693 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698/* 5-63 reserved*/
699};
700
701/*
702 * RXE Receive Error flags
703 */
704#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705static struct flag_table rxe_err_status_flags[] = {
706/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727 RXES(RBUF_BLOCK_LIST_READ_UNC)),
728/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729 RXES(RBUF_BLOCK_LIST_READ_COR)),
730/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733 RXES(RBUF_CSR_QENT_CNT_PARITY)),
734/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750 RXES(RBUF_FL_INITDONE_PARITY)),
751/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757 RXES(LOOKUP_DES_PART1_UNC_COR)),
758/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759 RXES(LOOKUP_DES_PART2_PARITY)),
760/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782};
783
784/* RXE errors that will trigger an SPC freeze */
785#define ALL_RXE_FREEZE_ERR \
786 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830
831#define RXE_FREEZE_ABORT_MASK \
832 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835
836/*
837 * DCC Error Flags
838 */
839#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840static struct flag_table dcc_err_flags[] = {
841 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887};
888
889/*
890 * LCB error flags
891 */
892#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893static struct flag_table lcb_err_flags[] = {
894/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929 LCBE(REDUNDANT_FLIT_PARITY_ERR))
930};
931
932/*
933 * DC8051 Error Flags
934 */
935#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936static struct flag_table dc8051_err_flags[] = {
937 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
946 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
947 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948};
949
950/*
951 * DC8051 Information Error flags
952 *
953 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954 */
955static struct flag_table dc8051_info_err_flags[] = {
956 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
957 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
958 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
959 FLAG_ENTRY0("Serdes internal loopback failure",
960 FAILED_SERDES_INTERNAL_LOOPBACK),
961 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
962 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
963 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
964 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
965 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
966 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
968 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
969};
970
971/*
972 * DC8051 Information Host Information flags
973 *
974 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
975 */
976static struct flag_table dc8051_info_host_msg_flags[] = {
977 FLAG_ENTRY0("Host request done", 0x0001),
978 FLAG_ENTRY0("BC SMA message", 0x0002),
979 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
980 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
981 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
982 FLAG_ENTRY0("External device config request", 0x0020),
983 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
984 FLAG_ENTRY0("LinkUp achieved", 0x0080),
985 FLAG_ENTRY0("Link going down", 0x0100),
986};
987
988
989static u32 encoded_size(u32 size);
990static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993 u8 *continuous);
994static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997 u8 *remote_tx_rate, u16 *link_widths);
998static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999 u8 *flag_bits, u16 *link_widths);
1000static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001 u8 *device_rev);
1002static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005 u8 *tx_polarity_inversion,
1006 u8 *rx_polarity_inversion, u8 *max_rate);
1007static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008 unsigned int context, u64 err_status);
1009static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010static void handle_dcc_err(struct hfi1_devdata *dd,
1011 unsigned int context, u64 err_status);
1012static void handle_lcb_err(struct hfi1_devdata *dd,
1013 unsigned int context, u64 err_status);
1014static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void set_partition_keys(struct hfi1_pportdata *);
1023static const char *link_state_name(u32 state);
1024static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025 u32 state);
1026static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027 u64 *out_data);
1028static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029static int thermal_init(struct hfi1_devdata *dd);
1030
1031static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032 int msecs);
1033static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1034static void handle_temp_err(struct hfi1_devdata *);
1035static void dc_shutdown(struct hfi1_devdata *);
1036static void dc_start(struct hfi1_devdata *);
1037
1038/*
1039 * Error interrupt table entry. This is used as input to the interrupt
1040 * "clear down" routine used for all second tier error interrupt register.
1041 * Second tier interrupt registers have a single bit representing them
1042 * in the top-level CceIntStatus.
1043 */
1044struct err_reg_info {
1045 u32 status; /* status CSR offset */
1046 u32 clear; /* clear CSR offset */
1047 u32 mask; /* mask CSR offset */
1048 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1049 const char *desc;
1050};
1051
1052#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1053#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1054#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1055
1056/*
1057 * Helpers for building HFI and DC error interrupt table entries. Different
1058 * helpers are needed because of inconsistent register names.
1059 */
1060#define EE(reg, handler, desc) \
1061 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1062 handler, desc }
1063#define DC_EE1(reg, handler, desc) \
1064 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1065#define DC_EE2(reg, handler, desc) \
1066 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1067
1068/*
1069 * Table of the "misc" grouping of error interrupts. Each entry refers to
1070 * another register containing more information.
1071 */
1072static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1073/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1074/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1075/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1076/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1077/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1078/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1079/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1080/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1081 /* the rest are reserved */
1082};
1083
1084/*
1085 * Index into the Various section of the interrupt sources
1086 * corresponding to the Critical Temperature interrupt.
1087 */
1088#define TCRIT_INT_SOURCE 4
1089
1090/*
1091 * SDMA error interrupt entry - refers to another register containing more
1092 * information.
1093 */
1094static const struct err_reg_info sdma_eng_err =
1095 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1096
1097static const struct err_reg_info various_err[NUM_VARIOUS] = {
1098/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1099/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1100/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1101/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1102/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1103 /* rest are reserved */
1104};
1105
1106/*
1107 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1108 * register can not be derived from the MTU value because 10K is not
1109 * a power of 2. Therefore, we need a constant. Everything else can
1110 * be calculated.
1111 */
1112#define DCC_CFG_PORT_MTU_CAP_10240 7
1113
1114/*
1115 * Table of the DC grouping of error interrupts. Each entry refers to
1116 * another register containing more information.
1117 */
1118static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1119/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1120/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1121/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1122/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1123 /* the rest are reserved */
1124};
1125
1126struct cntr_entry {
1127 /*
1128 * counter name
1129 */
1130 char *name;
1131
1132 /*
1133 * csr to read for name (if applicable)
1134 */
1135 u64 csr;
1136
1137 /*
1138 * offset into dd or ppd to store the counter's value
1139 */
1140 int offset;
1141
1142 /*
1143 * flags
1144 */
1145 u8 flags;
1146
1147 /*
1148 * accessor for stat element, context either dd or ppd
1149 */
1150 u64 (*rw_cntr)(const struct cntr_entry *,
1151 void *context,
1152 int vl,
1153 int mode,
1154 u64 data);
1155};
1156
1157#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1158#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1159
1160#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1161{ \
1162 name, \
1163 csr, \
1164 offset, \
1165 flags, \
1166 accessor \
1167}
1168
1169/* 32bit RXE */
1170#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1171CNTR_ELEM(#name, \
1172 (counter * 8 + RCV_COUNTER_ARRAY32), \
1173 0, flags | CNTR_32BIT, \
1174 port_access_u32_csr)
1175
1176#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1177CNTR_ELEM(#name, \
1178 (counter * 8 + RCV_COUNTER_ARRAY32), \
1179 0, flags | CNTR_32BIT, \
1180 dev_access_u32_csr)
1181
1182/* 64bit RXE */
1183#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1184CNTR_ELEM(#name, \
1185 (counter * 8 + RCV_COUNTER_ARRAY64), \
1186 0, flags, \
1187 port_access_u64_csr)
1188
1189#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1190CNTR_ELEM(#name, \
1191 (counter * 8 + RCV_COUNTER_ARRAY64), \
1192 0, flags, \
1193 dev_access_u64_csr)
1194
1195#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1196#define OVR_ELM(ctx) \
1197CNTR_ELEM("RcvHdrOvr" #ctx, \
1198 (RCV_HDR_OVFL_CNT + ctx*0x100), \
1199 0, CNTR_NORMAL, port_access_u64_csr)
1200
1201/* 32bit TXE */
1202#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1203CNTR_ELEM(#name, \
1204 (counter * 8 + SEND_COUNTER_ARRAY32), \
1205 0, flags | CNTR_32BIT, \
1206 port_access_u32_csr)
1207
1208/* 64bit TXE */
1209#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1210CNTR_ELEM(#name, \
1211 (counter * 8 + SEND_COUNTER_ARRAY64), \
1212 0, flags, \
1213 port_access_u64_csr)
1214
1215# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1216CNTR_ELEM(#name,\
1217 counter * 8 + SEND_COUNTER_ARRAY64, \
1218 0, \
1219 flags, \
1220 dev_access_u64_csr)
1221
1222/* CCE */
1223#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name, \
1225 (counter * 8 + CCE_COUNTER_ARRAY32), \
1226 0, flags | CNTR_32BIT, \
1227 dev_access_u32_csr)
1228
1229#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1230CNTR_ELEM(#name, \
1231 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1232 0, flags | CNTR_32BIT, \
1233 dev_access_u32_csr)
1234
1235/* DC */
1236#define DC_PERF_CNTR(name, counter, flags) \
1237CNTR_ELEM(#name, \
1238 counter, \
1239 0, \
1240 flags, \
1241 dev_access_u64_csr)
1242
1243#define DC_PERF_CNTR_LCB(name, counter, flags) \
1244CNTR_ELEM(#name, \
1245 counter, \
1246 0, \
1247 flags, \
1248 dc_access_lcb_cntr)
1249
1250/* ibp counters */
1251#define SW_IBP_CNTR(name, cntr) \
1252CNTR_ELEM(#name, \
1253 0, \
1254 0, \
1255 CNTR_SYNTH, \
1256 access_ibp_##cntr)
1257
1258u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1259{
1260 u64 val;
1261
1262 if (dd->flags & HFI1_PRESENT) {
1263 val = readq((void __iomem *)dd->kregbase + offset);
1264 return val;
1265 }
1266 return -1;
1267}
1268
1269void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1270{
1271 if (dd->flags & HFI1_PRESENT)
1272 writeq(value, (void __iomem *)dd->kregbase + offset);
1273}
1274
1275void __iomem *get_csr_addr(
1276 struct hfi1_devdata *dd,
1277 u32 offset)
1278{
1279 return (void __iomem *)dd->kregbase + offset;
1280}
1281
1282static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1283 int mode, u64 value)
1284{
1285 u64 ret;
1286
1287
1288 if (mode == CNTR_MODE_R) {
1289 ret = read_csr(dd, csr);
1290 } else if (mode == CNTR_MODE_W) {
1291 write_csr(dd, csr, value);
1292 ret = value;
1293 } else {
1294 dd_dev_err(dd, "Invalid cntr register access mode");
1295 return 0;
1296 }
1297
1298 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1299 return ret;
1300}
1301
1302/* Dev Access */
1303static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1304 void *context, int vl, int mode, u64 data)
1305{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301306 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001307 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001308
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001309 if (entry->flags & CNTR_SDMA) {
1310 if (vl == CNTR_INVALID_VL)
1311 return 0;
1312 csr += 0x100 * vl;
1313 } else {
1314 if (vl != CNTR_INVALID_VL)
1315 return 0;
1316 }
1317 return read_write_csr(dd, csr, mode, data);
1318}
1319
1320static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1321 void *context, int idx, int mode, u64 data)
1322{
1323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1324
1325 if (dd->per_sdma && idx < dd->num_sdma)
1326 return dd->per_sdma[idx].err_cnt;
1327 return 0;
1328}
1329
1330static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1331 void *context, int idx, int mode, u64 data)
1332{
1333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1334
1335 if (dd->per_sdma && idx < dd->num_sdma)
1336 return dd->per_sdma[idx].sdma_int_cnt;
1337 return 0;
1338}
1339
1340static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1341 void *context, int idx, int mode, u64 data)
1342{
1343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1344
1345 if (dd->per_sdma && idx < dd->num_sdma)
1346 return dd->per_sdma[idx].idle_int_cnt;
1347 return 0;
1348}
1349
1350static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1351 void *context, int idx, int mode,
1352 u64 data)
1353{
1354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1355
1356 if (dd->per_sdma && idx < dd->num_sdma)
1357 return dd->per_sdma[idx].progress_int_cnt;
1358 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001359}
1360
1361static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1362 int vl, int mode, u64 data)
1363{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301364 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001365
1366 u64 val = 0;
1367 u64 csr = entry->csr;
1368
1369 if (entry->flags & CNTR_VL) {
1370 if (vl == CNTR_INVALID_VL)
1371 return 0;
1372 csr += 8 * vl;
1373 } else {
1374 if (vl != CNTR_INVALID_VL)
1375 return 0;
1376 }
1377
1378 val = read_write_csr(dd, csr, mode, data);
1379 return val;
1380}
1381
1382static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1383 int vl, int mode, u64 data)
1384{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301385 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001386 u32 csr = entry->csr;
1387 int ret = 0;
1388
1389 if (vl != CNTR_INVALID_VL)
1390 return 0;
1391 if (mode == CNTR_MODE_R)
1392 ret = read_lcb_csr(dd, csr, &data);
1393 else if (mode == CNTR_MODE_W)
1394 ret = write_lcb_csr(dd, csr, data);
1395
1396 if (ret) {
1397 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1398 return 0;
1399 }
1400
1401 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1402 return data;
1403}
1404
1405/* Port Access */
1406static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1407 int vl, int mode, u64 data)
1408{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301409 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001410
1411 if (vl != CNTR_INVALID_VL)
1412 return 0;
1413 return read_write_csr(ppd->dd, entry->csr, mode, data);
1414}
1415
1416static u64 port_access_u64_csr(const struct cntr_entry *entry,
1417 void *context, int vl, int mode, u64 data)
1418{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301419 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001420 u64 val;
1421 u64 csr = entry->csr;
1422
1423 if (entry->flags & CNTR_VL) {
1424 if (vl == CNTR_INVALID_VL)
1425 return 0;
1426 csr += 8 * vl;
1427 } else {
1428 if (vl != CNTR_INVALID_VL)
1429 return 0;
1430 }
1431 val = read_write_csr(ppd->dd, csr, mode, data);
1432 return val;
1433}
1434
1435/* Software defined */
1436static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1437 u64 data)
1438{
1439 u64 ret;
1440
1441 if (mode == CNTR_MODE_R) {
1442 ret = *cntr;
1443 } else if (mode == CNTR_MODE_W) {
1444 *cntr = data;
1445 ret = data;
1446 } else {
1447 dd_dev_err(dd, "Invalid cntr sw access mode");
1448 return 0;
1449 }
1450
1451 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1452
1453 return ret;
1454}
1455
1456static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1457 int vl, int mode, u64 data)
1458{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301459 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001460
1461 if (vl != CNTR_INVALID_VL)
1462 return 0;
1463 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1464}
1465
1466static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1467 int vl, int mode, u64 data)
1468{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301469 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001470
1471 if (vl != CNTR_INVALID_VL)
1472 return 0;
1473 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1474}
1475
Dean Luick6d014532015-12-01 15:38:23 -05001476static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1477 void *context, int vl, int mode,
1478 u64 data)
1479{
1480 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1481
1482 if (vl != CNTR_INVALID_VL)
1483 return 0;
1484 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1485}
1486
Mike Marciniszyn77241052015-07-30 15:17:43 -04001487static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1488 void *context, int vl, int mode, u64 data)
1489{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001490 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1491 u64 zero = 0;
1492 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001493
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001494 if (vl == CNTR_INVALID_VL)
1495 counter = &ppd->port_xmit_discards;
1496 else if (vl >= 0 && vl < C_VL_COUNT)
1497 counter = &ppd->port_xmit_discards_vl[vl];
1498 else
1499 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001500
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001501 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001502}
1503
1504static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1505 void *context, int vl, int mode, u64 data)
1506{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301507 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001508
1509 if (vl != CNTR_INVALID_VL)
1510 return 0;
1511
1512 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1513 mode, data);
1514}
1515
1516static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1517 void *context, int vl, int mode, u64 data)
1518{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301519 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001520
1521 if (vl != CNTR_INVALID_VL)
1522 return 0;
1523
1524 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1525 mode, data);
1526}
1527
1528u64 get_all_cpu_total(u64 __percpu *cntr)
1529{
1530 int cpu;
1531 u64 counter = 0;
1532
1533 for_each_possible_cpu(cpu)
1534 counter += *per_cpu_ptr(cntr, cpu);
1535 return counter;
1536}
1537
1538static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1539 u64 __percpu *cntr,
1540 int vl, int mode, u64 data)
1541{
1542
1543 u64 ret = 0;
1544
1545 if (vl != CNTR_INVALID_VL)
1546 return 0;
1547
1548 if (mode == CNTR_MODE_R) {
1549 ret = get_all_cpu_total(cntr) - *z_val;
1550 } else if (mode == CNTR_MODE_W) {
1551 /* A write can only zero the counter */
1552 if (data == 0)
1553 *z_val = get_all_cpu_total(cntr);
1554 else
1555 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1556 } else {
1557 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1558 return 0;
1559 }
1560
1561 return ret;
1562}
1563
1564static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1565 void *context, int vl, int mode, u64 data)
1566{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301567 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001568
1569 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1570 mode, data);
1571}
1572
1573static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1574 void *context, int vl, int mode, u64 data)
1575{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301576 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001577
1578 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1579 mode, data);
1580}
1581
1582static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1583 void *context, int vl, int mode, u64 data)
1584{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301585 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001586
1587 return dd->verbs_dev.n_piowait;
1588}
1589
1590static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1591 void *context, int vl, int mode, u64 data)
1592{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301593 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001594
1595 return dd->verbs_dev.n_txwait;
1596}
1597
1598static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1599 void *context, int vl, int mode, u64 data)
1600{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301601 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001602
1603 return dd->verbs_dev.n_kmem_wait;
1604}
1605
Dean Luickb4219222015-10-26 10:28:35 -04001606static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1607 void *context, int vl, int mode, u64 data)
1608{
1609 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1610
1611 return dd->verbs_dev.n_send_schedule;
1612}
1613
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001614/* Software counters for the error status bits within MISC_ERR_STATUS */
1615static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1616 void *context, int vl, int mode,
1617 u64 data)
1618{
1619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1620
1621 return dd->misc_err_status_cnt[12];
1622}
1623
1624static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1625 void *context, int vl, int mode,
1626 u64 data)
1627{
1628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1629
1630 return dd->misc_err_status_cnt[11];
1631}
1632
1633static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1634 void *context, int vl, int mode,
1635 u64 data)
1636{
1637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1638
1639 return dd->misc_err_status_cnt[10];
1640}
1641
1642static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1643 void *context, int vl,
1644 int mode, u64 data)
1645{
1646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1647
1648 return dd->misc_err_status_cnt[9];
1649}
1650
1651static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1652 void *context, int vl, int mode,
1653 u64 data)
1654{
1655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1656
1657 return dd->misc_err_status_cnt[8];
1658}
1659
1660static u64 access_misc_efuse_read_bad_addr_err_cnt(
1661 const struct cntr_entry *entry,
1662 void *context, int vl, int mode, u64 data)
1663{
1664 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1665
1666 return dd->misc_err_status_cnt[7];
1667}
1668
1669static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1670 void *context, int vl,
1671 int mode, u64 data)
1672{
1673 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1674
1675 return dd->misc_err_status_cnt[6];
1676}
1677
1678static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1679 void *context, int vl, int mode,
1680 u64 data)
1681{
1682 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1683
1684 return dd->misc_err_status_cnt[5];
1685}
1686
1687static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1688 void *context, int vl, int mode,
1689 u64 data)
1690{
1691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1692
1693 return dd->misc_err_status_cnt[4];
1694}
1695
1696static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1697 void *context, int vl,
1698 int mode, u64 data)
1699{
1700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1701
1702 return dd->misc_err_status_cnt[3];
1703}
1704
1705static u64 access_misc_csr_write_bad_addr_err_cnt(
1706 const struct cntr_entry *entry,
1707 void *context, int vl, int mode, u64 data)
1708{
1709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1710
1711 return dd->misc_err_status_cnt[2];
1712}
1713
1714static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1715 void *context, int vl,
1716 int mode, u64 data)
1717{
1718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720 return dd->misc_err_status_cnt[1];
1721}
1722
1723static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1724 void *context, int vl, int mode,
1725 u64 data)
1726{
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729 return dd->misc_err_status_cnt[0];
1730}
1731
1732/*
1733 * Software counter for the aggregate of
1734 * individual CceErrStatus counters
1735 */
1736static u64 access_sw_cce_err_status_aggregated_cnt(
1737 const struct cntr_entry *entry,
1738 void *context, int vl, int mode, u64 data)
1739{
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742 return dd->sw_cce_err_status_aggregate;
1743}
1744
1745/*
1746 * Software counters corresponding to each of the
1747 * error status bits within CceErrStatus
1748 */
1749static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1750 void *context, int vl, int mode,
1751 u64 data)
1752{
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755 return dd->cce_err_status_cnt[40];
1756}
1757
1758static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1759 void *context, int vl, int mode,
1760 u64 data)
1761{
1762 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1763
1764 return dd->cce_err_status_cnt[39];
1765}
1766
1767static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1768 void *context, int vl, int mode,
1769 u64 data)
1770{
1771 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1772
1773 return dd->cce_err_status_cnt[38];
1774}
1775
1776static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1777 void *context, int vl, int mode,
1778 u64 data)
1779{
1780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1781
1782 return dd->cce_err_status_cnt[37];
1783}
1784
1785static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1786 void *context, int vl, int mode,
1787 u64 data)
1788{
1789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1790
1791 return dd->cce_err_status_cnt[36];
1792}
1793
1794static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1795 const struct cntr_entry *entry,
1796 void *context, int vl, int mode, u64 data)
1797{
1798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1799
1800 return dd->cce_err_status_cnt[35];
1801}
1802
1803static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1804 const struct cntr_entry *entry,
1805 void *context, int vl, int mode, u64 data)
1806{
1807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1808
1809 return dd->cce_err_status_cnt[34];
1810}
1811
1812static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1813 void *context, int vl,
1814 int mode, u64 data)
1815{
1816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1817
1818 return dd->cce_err_status_cnt[33];
1819}
1820
1821static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1822 void *context, int vl, int mode,
1823 u64 data)
1824{
1825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1826
1827 return dd->cce_err_status_cnt[32];
1828}
1829
1830static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1831 void *context, int vl, int mode, u64 data)
1832{
1833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1834
1835 return dd->cce_err_status_cnt[31];
1836}
1837
1838static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1839 void *context, int vl, int mode,
1840 u64 data)
1841{
1842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1843
1844 return dd->cce_err_status_cnt[30];
1845}
1846
1847static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1848 void *context, int vl, int mode,
1849 u64 data)
1850{
1851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1852
1853 return dd->cce_err_status_cnt[29];
1854}
1855
1856static u64 access_pcic_transmit_back_parity_err_cnt(
1857 const struct cntr_entry *entry,
1858 void *context, int vl, int mode, u64 data)
1859{
1860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1861
1862 return dd->cce_err_status_cnt[28];
1863}
1864
1865static u64 access_pcic_transmit_front_parity_err_cnt(
1866 const struct cntr_entry *entry,
1867 void *context, int vl, int mode, u64 data)
1868{
1869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1870
1871 return dd->cce_err_status_cnt[27];
1872}
1873
1874static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1875 void *context, int vl, int mode,
1876 u64 data)
1877{
1878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1879
1880 return dd->cce_err_status_cnt[26];
1881}
1882
1883static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1884 void *context, int vl, int mode,
1885 u64 data)
1886{
1887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1888
1889 return dd->cce_err_status_cnt[25];
1890}
1891
1892static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1893 void *context, int vl, int mode,
1894 u64 data)
1895{
1896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1897
1898 return dd->cce_err_status_cnt[24];
1899}
1900
1901static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1902 void *context, int vl, int mode,
1903 u64 data)
1904{
1905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1906
1907 return dd->cce_err_status_cnt[23];
1908}
1909
1910static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1911 void *context, int vl,
1912 int mode, u64 data)
1913{
1914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1915
1916 return dd->cce_err_status_cnt[22];
1917}
1918
1919static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1920 void *context, int vl, int mode,
1921 u64 data)
1922{
1923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1924
1925 return dd->cce_err_status_cnt[21];
1926}
1927
1928static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1929 const struct cntr_entry *entry,
1930 void *context, int vl, int mode, u64 data)
1931{
1932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934 return dd->cce_err_status_cnt[20];
1935}
1936
1937static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1938 void *context, int vl,
1939 int mode, u64 data)
1940{
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943 return dd->cce_err_status_cnt[19];
1944}
1945
1946static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1947 void *context, int vl, int mode,
1948 u64 data)
1949{
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952 return dd->cce_err_status_cnt[18];
1953}
1954
1955static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1956 void *context, int vl, int mode,
1957 u64 data)
1958{
1959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961 return dd->cce_err_status_cnt[17];
1962}
1963
1964static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1965 void *context, int vl, int mode,
1966 u64 data)
1967{
1968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970 return dd->cce_err_status_cnt[16];
1971}
1972
1973static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1974 void *context, int vl, int mode,
1975 u64 data)
1976{
1977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979 return dd->cce_err_status_cnt[15];
1980}
1981
1982static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1983 void *context, int vl,
1984 int mode, u64 data)
1985{
1986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988 return dd->cce_err_status_cnt[14];
1989}
1990
1991static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1992 void *context, int vl, int mode,
1993 u64 data)
1994{
1995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997 return dd->cce_err_status_cnt[13];
1998}
1999
2000static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2001 const struct cntr_entry *entry,
2002 void *context, int vl, int mode, u64 data)
2003{
2004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006 return dd->cce_err_status_cnt[12];
2007}
2008
2009static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2010 const struct cntr_entry *entry,
2011 void *context, int vl, int mode, u64 data)
2012{
2013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015 return dd->cce_err_status_cnt[11];
2016}
2017
2018static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2019 const struct cntr_entry *entry,
2020 void *context, int vl, int mode, u64 data)
2021{
2022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024 return dd->cce_err_status_cnt[10];
2025}
2026
2027static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2028 const struct cntr_entry *entry,
2029 void *context, int vl, int mode, u64 data)
2030{
2031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033 return dd->cce_err_status_cnt[9];
2034}
2035
2036static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2037 const struct cntr_entry *entry,
2038 void *context, int vl, int mode, u64 data)
2039{
2040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042 return dd->cce_err_status_cnt[8];
2043}
2044
2045static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2046 void *context, int vl,
2047 int mode, u64 data)
2048{
2049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051 return dd->cce_err_status_cnt[7];
2052}
2053
2054static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2055 const struct cntr_entry *entry,
2056 void *context, int vl, int mode, u64 data)
2057{
2058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060 return dd->cce_err_status_cnt[6];
2061}
2062
2063static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2064 void *context, int vl, int mode,
2065 u64 data)
2066{
2067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069 return dd->cce_err_status_cnt[5];
2070}
2071
2072static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2073 void *context, int vl, int mode,
2074 u64 data)
2075{
2076 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078 return dd->cce_err_status_cnt[4];
2079}
2080
2081static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2082 const struct cntr_entry *entry,
2083 void *context, int vl, int mode, u64 data)
2084{
2085 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087 return dd->cce_err_status_cnt[3];
2088}
2089
2090static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2091 void *context, int vl,
2092 int mode, u64 data)
2093{
2094 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096 return dd->cce_err_status_cnt[2];
2097}
2098
2099static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2100 void *context, int vl,
2101 int mode, u64 data)
2102{
2103 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105 return dd->cce_err_status_cnt[1];
2106}
2107
2108static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2109 void *context, int vl, int mode,
2110 u64 data)
2111{
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114 return dd->cce_err_status_cnt[0];
2115}
2116
2117/*
2118 * Software counters corresponding to each of the
2119 * error status bits within RcvErrStatus
2120 */
2121static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2122 void *context, int vl, int mode,
2123 u64 data)
2124{
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127 return dd->rcv_err_status_cnt[63];
2128}
2129
2130static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2131 void *context, int vl,
2132 int mode, u64 data)
2133{
2134 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2135
2136 return dd->rcv_err_status_cnt[62];
2137}
2138
2139static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2140 void *context, int vl, int mode,
2141 u64 data)
2142{
2143 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2144
2145 return dd->rcv_err_status_cnt[61];
2146}
2147
2148static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2149 void *context, int vl, int mode,
2150 u64 data)
2151{
2152 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2153
2154 return dd->rcv_err_status_cnt[60];
2155}
2156
2157static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2158 void *context, int vl,
2159 int mode, u64 data)
2160{
2161 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2162
2163 return dd->rcv_err_status_cnt[59];
2164}
2165
2166static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2167 void *context, int vl,
2168 int mode, u64 data)
2169{
2170 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2171
2172 return dd->rcv_err_status_cnt[58];
2173}
2174
2175static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2176 void *context, int vl, int mode,
2177 u64 data)
2178{
2179 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2180
2181 return dd->rcv_err_status_cnt[57];
2182}
2183
2184static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2185 void *context, int vl, int mode,
2186 u64 data)
2187{
2188 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2189
2190 return dd->rcv_err_status_cnt[56];
2191}
2192
2193static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2194 void *context, int vl, int mode,
2195 u64 data)
2196{
2197 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2198
2199 return dd->rcv_err_status_cnt[55];
2200}
2201
2202static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2203 const struct cntr_entry *entry,
2204 void *context, int vl, int mode, u64 data)
2205{
2206 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2207
2208 return dd->rcv_err_status_cnt[54];
2209}
2210
2211static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2212 const struct cntr_entry *entry,
2213 void *context, int vl, int mode, u64 data)
2214{
2215 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2216
2217 return dd->rcv_err_status_cnt[53];
2218}
2219
2220static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2221 void *context, int vl,
2222 int mode, u64 data)
2223{
2224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226 return dd->rcv_err_status_cnt[52];
2227}
2228
2229static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2230 void *context, int vl,
2231 int mode, u64 data)
2232{
2233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235 return dd->rcv_err_status_cnt[51];
2236}
2237
2238static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2239 void *context, int vl,
2240 int mode, u64 data)
2241{
2242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244 return dd->rcv_err_status_cnt[50];
2245}
2246
2247static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2248 void *context, int vl,
2249 int mode, u64 data)
2250{
2251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253 return dd->rcv_err_status_cnt[49];
2254}
2255
2256static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2257 void *context, int vl,
2258 int mode, u64 data)
2259{
2260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262 return dd->rcv_err_status_cnt[48];
2263}
2264
2265static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2266 void *context, int vl,
2267 int mode, u64 data)
2268{
2269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271 return dd->rcv_err_status_cnt[47];
2272}
2273
2274static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2275 void *context, int vl, int mode,
2276 u64 data)
2277{
2278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280 return dd->rcv_err_status_cnt[46];
2281}
2282
2283static u64 access_rx_hq_intr_csr_parity_err_cnt(
2284 const struct cntr_entry *entry,
2285 void *context, int vl, int mode, u64 data)
2286{
2287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289 return dd->rcv_err_status_cnt[45];
2290}
2291
2292static u64 access_rx_lookup_csr_parity_err_cnt(
2293 const struct cntr_entry *entry,
2294 void *context, int vl, int mode, u64 data)
2295{
2296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298 return dd->rcv_err_status_cnt[44];
2299}
2300
2301static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2302 const struct cntr_entry *entry,
2303 void *context, int vl, int mode, u64 data)
2304{
2305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307 return dd->rcv_err_status_cnt[43];
2308}
2309
2310static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2311 const struct cntr_entry *entry,
2312 void *context, int vl, int mode, u64 data)
2313{
2314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316 return dd->rcv_err_status_cnt[42];
2317}
2318
2319static u64 access_rx_lookup_des_part2_parity_err_cnt(
2320 const struct cntr_entry *entry,
2321 void *context, int vl, int mode, u64 data)
2322{
2323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325 return dd->rcv_err_status_cnt[41];
2326}
2327
2328static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2329 const struct cntr_entry *entry,
2330 void *context, int vl, int mode, u64 data)
2331{
2332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334 return dd->rcv_err_status_cnt[40];
2335}
2336
2337static u64 access_rx_lookup_des_part1_unc_err_cnt(
2338 const struct cntr_entry *entry,
2339 void *context, int vl, int mode, u64 data)
2340{
2341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343 return dd->rcv_err_status_cnt[39];
2344}
2345
2346static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2347 const struct cntr_entry *entry,
2348 void *context, int vl, int mode, u64 data)
2349{
2350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352 return dd->rcv_err_status_cnt[38];
2353}
2354
2355static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2356 const struct cntr_entry *entry,
2357 void *context, int vl, int mode, u64 data)
2358{
2359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361 return dd->rcv_err_status_cnt[37];
2362}
2363
2364static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2365 const struct cntr_entry *entry,
2366 void *context, int vl, int mode, u64 data)
2367{
2368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370 return dd->rcv_err_status_cnt[36];
2371}
2372
2373static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2374 const struct cntr_entry *entry,
2375 void *context, int vl, int mode, u64 data)
2376{
2377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379 return dd->rcv_err_status_cnt[35];
2380}
2381
2382static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2383 const struct cntr_entry *entry,
2384 void *context, int vl, int mode, u64 data)
2385{
2386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388 return dd->rcv_err_status_cnt[34];
2389}
2390
2391static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2392 const struct cntr_entry *entry,
2393 void *context, int vl, int mode, u64 data)
2394{
2395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397 return dd->rcv_err_status_cnt[33];
2398}
2399
2400static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2401 void *context, int vl, int mode,
2402 u64 data)
2403{
2404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406 return dd->rcv_err_status_cnt[32];
2407}
2408
2409static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2410 void *context, int vl, int mode,
2411 u64 data)
2412{
2413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415 return dd->rcv_err_status_cnt[31];
2416}
2417
2418static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2419 void *context, int vl, int mode,
2420 u64 data)
2421{
2422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424 return dd->rcv_err_status_cnt[30];
2425}
2426
2427static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2428 void *context, int vl, int mode,
2429 u64 data)
2430{
2431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433 return dd->rcv_err_status_cnt[29];
2434}
2435
2436static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2437 void *context, int vl,
2438 int mode, u64 data)
2439{
2440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442 return dd->rcv_err_status_cnt[28];
2443}
2444
2445static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2446 const struct cntr_entry *entry,
2447 void *context, int vl, int mode, u64 data)
2448{
2449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451 return dd->rcv_err_status_cnt[27];
2452}
2453
2454static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2455 const struct cntr_entry *entry,
2456 void *context, int vl, int mode, u64 data)
2457{
2458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460 return dd->rcv_err_status_cnt[26];
2461}
2462
2463static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2464 const struct cntr_entry *entry,
2465 void *context, int vl, int mode, u64 data)
2466{
2467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469 return dd->rcv_err_status_cnt[25];
2470}
2471
2472static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2473 const struct cntr_entry *entry,
2474 void *context, int vl, int mode, u64 data)
2475{
2476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478 return dd->rcv_err_status_cnt[24];
2479}
2480
2481static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2482 const struct cntr_entry *entry,
2483 void *context, int vl, int mode, u64 data)
2484{
2485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487 return dd->rcv_err_status_cnt[23];
2488}
2489
2490static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2491 const struct cntr_entry *entry,
2492 void *context, int vl, int mode, u64 data)
2493{
2494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496 return dd->rcv_err_status_cnt[22];
2497}
2498
2499static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2500 const struct cntr_entry *entry,
2501 void *context, int vl, int mode, u64 data)
2502{
2503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505 return dd->rcv_err_status_cnt[21];
2506}
2507
2508static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2509 const struct cntr_entry *entry,
2510 void *context, int vl, int mode, u64 data)
2511{
2512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514 return dd->rcv_err_status_cnt[20];
2515}
2516
2517static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2518 const struct cntr_entry *entry,
2519 void *context, int vl, int mode, u64 data)
2520{
2521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523 return dd->rcv_err_status_cnt[19];
2524}
2525
2526static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2527 void *context, int vl,
2528 int mode, u64 data)
2529{
2530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532 return dd->rcv_err_status_cnt[18];
2533}
2534
2535static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2536 void *context, int vl,
2537 int mode, u64 data)
2538{
2539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541 return dd->rcv_err_status_cnt[17];
2542}
2543
2544static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2545 const struct cntr_entry *entry,
2546 void *context, int vl, int mode, u64 data)
2547{
2548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550 return dd->rcv_err_status_cnt[16];
2551}
2552
2553static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2554 const struct cntr_entry *entry,
2555 void *context, int vl, int mode, u64 data)
2556{
2557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559 return dd->rcv_err_status_cnt[15];
2560}
2561
2562static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2563 void *context, int vl,
2564 int mode, u64 data)
2565{
2566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568 return dd->rcv_err_status_cnt[14];
2569}
2570
2571static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2572 void *context, int vl,
2573 int mode, u64 data)
2574{
2575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577 return dd->rcv_err_status_cnt[13];
2578}
2579
2580static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2581 void *context, int vl, int mode,
2582 u64 data)
2583{
2584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586 return dd->rcv_err_status_cnt[12];
2587}
2588
2589static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2590 void *context, int vl, int mode,
2591 u64 data)
2592{
2593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595 return dd->rcv_err_status_cnt[11];
2596}
2597
2598static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2599 void *context, int vl, int mode,
2600 u64 data)
2601{
2602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604 return dd->rcv_err_status_cnt[10];
2605}
2606
2607static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2608 void *context, int vl, int mode,
2609 u64 data)
2610{
2611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613 return dd->rcv_err_status_cnt[9];
2614}
2615
2616static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2617 void *context, int vl, int mode,
2618 u64 data)
2619{
2620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622 return dd->rcv_err_status_cnt[8];
2623}
2624
2625static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2626 const struct cntr_entry *entry,
2627 void *context, int vl, int mode, u64 data)
2628{
2629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631 return dd->rcv_err_status_cnt[7];
2632}
2633
2634static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2635 const struct cntr_entry *entry,
2636 void *context, int vl, int mode, u64 data)
2637{
2638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640 return dd->rcv_err_status_cnt[6];
2641}
2642
2643static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2644 void *context, int vl, int mode,
2645 u64 data)
2646{
2647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649 return dd->rcv_err_status_cnt[5];
2650}
2651
2652static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2653 void *context, int vl, int mode,
2654 u64 data)
2655{
2656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658 return dd->rcv_err_status_cnt[4];
2659}
2660
2661static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2662 void *context, int vl, int mode,
2663 u64 data)
2664{
2665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667 return dd->rcv_err_status_cnt[3];
2668}
2669
2670static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2671 void *context, int vl, int mode,
2672 u64 data)
2673{
2674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676 return dd->rcv_err_status_cnt[2];
2677}
2678
2679static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2680 void *context, int vl, int mode,
2681 u64 data)
2682{
2683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685 return dd->rcv_err_status_cnt[1];
2686}
2687
2688static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2689 void *context, int vl, int mode,
2690 u64 data)
2691{
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694 return dd->rcv_err_status_cnt[0];
2695}
2696
2697/*
2698 * Software counters corresponding to each of the
2699 * error status bits within SendPioErrStatus
2700 */
2701static u64 access_pio_pec_sop_head_parity_err_cnt(
2702 const struct cntr_entry *entry,
2703 void *context, int vl, int mode, u64 data)
2704{
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707 return dd->send_pio_err_status_cnt[35];
2708}
2709
2710static u64 access_pio_pcc_sop_head_parity_err_cnt(
2711 const struct cntr_entry *entry,
2712 void *context, int vl, int mode, u64 data)
2713{
2714 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2715
2716 return dd->send_pio_err_status_cnt[34];
2717}
2718
2719static u64 access_pio_last_returned_cnt_parity_err_cnt(
2720 const struct cntr_entry *entry,
2721 void *context, int vl, int mode, u64 data)
2722{
2723 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2724
2725 return dd->send_pio_err_status_cnt[33];
2726}
2727
2728static u64 access_pio_current_free_cnt_parity_err_cnt(
2729 const struct cntr_entry *entry,
2730 void *context, int vl, int mode, u64 data)
2731{
2732 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2733
2734 return dd->send_pio_err_status_cnt[32];
2735}
2736
2737static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2738 void *context, int vl, int mode,
2739 u64 data)
2740{
2741 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2742
2743 return dd->send_pio_err_status_cnt[31];
2744}
2745
2746static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2747 void *context, int vl, int mode,
2748 u64 data)
2749{
2750 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2751
2752 return dd->send_pio_err_status_cnt[30];
2753}
2754
2755static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2756 void *context, int vl, int mode,
2757 u64 data)
2758{
2759 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2760
2761 return dd->send_pio_err_status_cnt[29];
2762}
2763
2764static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2765 const struct cntr_entry *entry,
2766 void *context, int vl, int mode, u64 data)
2767{
2768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2769
2770 return dd->send_pio_err_status_cnt[28];
2771}
2772
2773static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2774 void *context, int vl, int mode,
2775 u64 data)
2776{
2777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2778
2779 return dd->send_pio_err_status_cnt[27];
2780}
2781
2782static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2783 void *context, int vl, int mode,
2784 u64 data)
2785{
2786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2787
2788 return dd->send_pio_err_status_cnt[26];
2789}
2790
2791static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2792 void *context, int vl,
2793 int mode, u64 data)
2794{
2795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2796
2797 return dd->send_pio_err_status_cnt[25];
2798}
2799
2800static u64 access_pio_block_qw_count_parity_err_cnt(
2801 const struct cntr_entry *entry,
2802 void *context, int vl, int mode, u64 data)
2803{
2804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806 return dd->send_pio_err_status_cnt[24];
2807}
2808
2809static u64 access_pio_write_qw_valid_parity_err_cnt(
2810 const struct cntr_entry *entry,
2811 void *context, int vl, int mode, u64 data)
2812{
2813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815 return dd->send_pio_err_status_cnt[23];
2816}
2817
2818static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2819 void *context, int vl, int mode,
2820 u64 data)
2821{
2822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824 return dd->send_pio_err_status_cnt[22];
2825}
2826
2827static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2828 void *context, int vl,
2829 int mode, u64 data)
2830{
2831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833 return dd->send_pio_err_status_cnt[21];
2834}
2835
2836static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2837 void *context, int vl,
2838 int mode, u64 data)
2839{
2840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842 return dd->send_pio_err_status_cnt[20];
2843}
2844
2845static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2846 void *context, int vl,
2847 int mode, u64 data)
2848{
2849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851 return dd->send_pio_err_status_cnt[19];
2852}
2853
2854static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2855 const struct cntr_entry *entry,
2856 void *context, int vl, int mode, u64 data)
2857{
2858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860 return dd->send_pio_err_status_cnt[18];
2861}
2862
2863static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2864 void *context, int vl, int mode,
2865 u64 data)
2866{
2867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869 return dd->send_pio_err_status_cnt[17];
2870}
2871
2872static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2873 void *context, int vl, int mode,
2874 u64 data)
2875{
2876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878 return dd->send_pio_err_status_cnt[16];
2879}
2880
2881static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2882 const struct cntr_entry *entry,
2883 void *context, int vl, int mode, u64 data)
2884{
2885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887 return dd->send_pio_err_status_cnt[15];
2888}
2889
2890static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2891 const struct cntr_entry *entry,
2892 void *context, int vl, int mode, u64 data)
2893{
2894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896 return dd->send_pio_err_status_cnt[14];
2897}
2898
2899static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2900 const struct cntr_entry *entry,
2901 void *context, int vl, int mode, u64 data)
2902{
2903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905 return dd->send_pio_err_status_cnt[13];
2906}
2907
2908static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2909 const struct cntr_entry *entry,
2910 void *context, int vl, int mode, u64 data)
2911{
2912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914 return dd->send_pio_err_status_cnt[12];
2915}
2916
2917static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2918 const struct cntr_entry *entry,
2919 void *context, int vl, int mode, u64 data)
2920{
2921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923 return dd->send_pio_err_status_cnt[11];
2924}
2925
2926static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2927 const struct cntr_entry *entry,
2928 void *context, int vl, int mode, u64 data)
2929{
2930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932 return dd->send_pio_err_status_cnt[10];
2933}
2934
2935static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2936 const struct cntr_entry *entry,
2937 void *context, int vl, int mode, u64 data)
2938{
2939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941 return dd->send_pio_err_status_cnt[9];
2942}
2943
2944static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2945 const struct cntr_entry *entry,
2946 void *context, int vl, int mode, u64 data)
2947{
2948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950 return dd->send_pio_err_status_cnt[8];
2951}
2952
2953static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2954 const struct cntr_entry *entry,
2955 void *context, int vl, int mode, u64 data)
2956{
2957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959 return dd->send_pio_err_status_cnt[7];
2960}
2961
2962static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2963 void *context, int vl, int mode,
2964 u64 data)
2965{
2966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968 return dd->send_pio_err_status_cnt[6];
2969}
2970
2971static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2972 void *context, int vl, int mode,
2973 u64 data)
2974{
2975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977 return dd->send_pio_err_status_cnt[5];
2978}
2979
2980static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2981 void *context, int vl, int mode,
2982 u64 data)
2983{
2984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986 return dd->send_pio_err_status_cnt[4];
2987}
2988
2989static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2990 void *context, int vl, int mode,
2991 u64 data)
2992{
2993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995 return dd->send_pio_err_status_cnt[3];
2996}
2997
2998static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
2999 void *context, int vl, int mode,
3000 u64 data)
3001{
3002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004 return dd->send_pio_err_status_cnt[2];
3005}
3006
3007static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3008 void *context, int vl,
3009 int mode, u64 data)
3010{
3011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013 return dd->send_pio_err_status_cnt[1];
3014}
3015
3016static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3017 void *context, int vl, int mode,
3018 u64 data)
3019{
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022 return dd->send_pio_err_status_cnt[0];
3023}
3024
3025/*
3026 * Software counters corresponding to each of the
3027 * error status bits within SendDmaErrStatus
3028 */
3029static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3030 const struct cntr_entry *entry,
3031 void *context, int vl, int mode, u64 data)
3032{
3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034
3035 return dd->send_dma_err_status_cnt[3];
3036}
3037
3038static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3039 const struct cntr_entry *entry,
3040 void *context, int vl, int mode, u64 data)
3041{
3042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3043
3044 return dd->send_dma_err_status_cnt[2];
3045}
3046
3047static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3048 void *context, int vl, int mode,
3049 u64 data)
3050{
3051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3052
3053 return dd->send_dma_err_status_cnt[1];
3054}
3055
3056static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3057 void *context, int vl, int mode,
3058 u64 data)
3059{
3060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3061
3062 return dd->send_dma_err_status_cnt[0];
3063}
3064
3065/*
3066 * Software counters corresponding to each of the
3067 * error status bits within SendEgressErrStatus
3068 */
3069static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3070 const struct cntr_entry *entry,
3071 void *context, int vl, int mode, u64 data)
3072{
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075 return dd->send_egress_err_status_cnt[63];
3076}
3077
3078static u64 access_tx_read_sdma_memory_csr_err_cnt(
3079 const struct cntr_entry *entry,
3080 void *context, int vl, int mode, u64 data)
3081{
3082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3083
3084 return dd->send_egress_err_status_cnt[62];
3085}
3086
3087static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3088 void *context, int vl, int mode,
3089 u64 data)
3090{
3091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3092
3093 return dd->send_egress_err_status_cnt[61];
3094}
3095
3096static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3097 void *context, int vl,
3098 int mode, u64 data)
3099{
3100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3101
3102 return dd->send_egress_err_status_cnt[60];
3103}
3104
3105static u64 access_tx_read_sdma_memory_cor_err_cnt(
3106 const struct cntr_entry *entry,
3107 void *context, int vl, int mode, u64 data)
3108{
3109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3110
3111 return dd->send_egress_err_status_cnt[59];
3112}
3113
3114static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3115 void *context, int vl, int mode,
3116 u64 data)
3117{
3118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3119
3120 return dd->send_egress_err_status_cnt[58];
3121}
3122
3123static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3124 void *context, int vl, int mode,
3125 u64 data)
3126{
3127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3128
3129 return dd->send_egress_err_status_cnt[57];
3130}
3131
3132static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3133 void *context, int vl, int mode,
3134 u64 data)
3135{
3136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3137
3138 return dd->send_egress_err_status_cnt[56];
3139}
3140
3141static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3142 void *context, int vl, int mode,
3143 u64 data)
3144{
3145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3146
3147 return dd->send_egress_err_status_cnt[55];
3148}
3149
3150static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3151 void *context, int vl, int mode,
3152 u64 data)
3153{
3154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3155
3156 return dd->send_egress_err_status_cnt[54];
3157}
3158
3159static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3160 void *context, int vl, int mode,
3161 u64 data)
3162{
3163 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3164
3165 return dd->send_egress_err_status_cnt[53];
3166}
3167
3168static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3169 void *context, int vl, int mode,
3170 u64 data)
3171{
3172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174 return dd->send_egress_err_status_cnt[52];
3175}
3176
3177static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3178 void *context, int vl, int mode,
3179 u64 data)
3180{
3181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183 return dd->send_egress_err_status_cnt[51];
3184}
3185
3186static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3187 void *context, int vl, int mode,
3188 u64 data)
3189{
3190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192 return dd->send_egress_err_status_cnt[50];
3193}
3194
3195static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3196 void *context, int vl, int mode,
3197 u64 data)
3198{
3199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201 return dd->send_egress_err_status_cnt[49];
3202}
3203
3204static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3205 void *context, int vl, int mode,
3206 u64 data)
3207{
3208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210 return dd->send_egress_err_status_cnt[48];
3211}
3212
3213static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3214 void *context, int vl, int mode,
3215 u64 data)
3216{
3217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219 return dd->send_egress_err_status_cnt[47];
3220}
3221
3222static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3223 void *context, int vl, int mode,
3224 u64 data)
3225{
3226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228 return dd->send_egress_err_status_cnt[46];
3229}
3230
3231static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3232 void *context, int vl, int mode,
3233 u64 data)
3234{
3235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237 return dd->send_egress_err_status_cnt[45];
3238}
3239
3240static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3241 void *context, int vl,
3242 int mode, u64 data)
3243{
3244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246 return dd->send_egress_err_status_cnt[44];
3247}
3248
3249static u64 access_tx_read_sdma_memory_unc_err_cnt(
3250 const struct cntr_entry *entry,
3251 void *context, int vl, int mode, u64 data)
3252{
3253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255 return dd->send_egress_err_status_cnt[43];
3256}
3257
3258static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3259 void *context, int vl, int mode,
3260 u64 data)
3261{
3262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264 return dd->send_egress_err_status_cnt[42];
3265}
3266
3267static u64 access_tx_credit_return_partiy_err_cnt(
3268 const struct cntr_entry *entry,
3269 void *context, int vl, int mode, u64 data)
3270{
3271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273 return dd->send_egress_err_status_cnt[41];
3274}
3275
3276static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3277 const struct cntr_entry *entry,
3278 void *context, int vl, int mode, u64 data)
3279{
3280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282 return dd->send_egress_err_status_cnt[40];
3283}
3284
3285static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3286 const struct cntr_entry *entry,
3287 void *context, int vl, int mode, u64 data)
3288{
3289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291 return dd->send_egress_err_status_cnt[39];
3292}
3293
3294static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3295 const struct cntr_entry *entry,
3296 void *context, int vl, int mode, u64 data)
3297{
3298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300 return dd->send_egress_err_status_cnt[38];
3301}
3302
3303static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3304 const struct cntr_entry *entry,
3305 void *context, int vl, int mode, u64 data)
3306{
3307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309 return dd->send_egress_err_status_cnt[37];
3310}
3311
3312static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3313 const struct cntr_entry *entry,
3314 void *context, int vl, int mode, u64 data)
3315{
3316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318 return dd->send_egress_err_status_cnt[36];
3319}
3320
3321static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3322 const struct cntr_entry *entry,
3323 void *context, int vl, int mode, u64 data)
3324{
3325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327 return dd->send_egress_err_status_cnt[35];
3328}
3329
3330static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3331 const struct cntr_entry *entry,
3332 void *context, int vl, int mode, u64 data)
3333{
3334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336 return dd->send_egress_err_status_cnt[34];
3337}
3338
3339static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3340 const struct cntr_entry *entry,
3341 void *context, int vl, int mode, u64 data)
3342{
3343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345 return dd->send_egress_err_status_cnt[33];
3346}
3347
3348static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3349 const struct cntr_entry *entry,
3350 void *context, int vl, int mode, u64 data)
3351{
3352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354 return dd->send_egress_err_status_cnt[32];
3355}
3356
3357static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3358 const struct cntr_entry *entry,
3359 void *context, int vl, int mode, u64 data)
3360{
3361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363 return dd->send_egress_err_status_cnt[31];
3364}
3365
3366static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3367 const struct cntr_entry *entry,
3368 void *context, int vl, int mode, u64 data)
3369{
3370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372 return dd->send_egress_err_status_cnt[30];
3373}
3374
3375static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3376 const struct cntr_entry *entry,
3377 void *context, int vl, int mode, u64 data)
3378{
3379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381 return dd->send_egress_err_status_cnt[29];
3382}
3383
3384static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3385 const struct cntr_entry *entry,
3386 void *context, int vl, int mode, u64 data)
3387{
3388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390 return dd->send_egress_err_status_cnt[28];
3391}
3392
3393static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3394 const struct cntr_entry *entry,
3395 void *context, int vl, int mode, u64 data)
3396{
3397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399 return dd->send_egress_err_status_cnt[27];
3400}
3401
3402static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3403 const struct cntr_entry *entry,
3404 void *context, int vl, int mode, u64 data)
3405{
3406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408 return dd->send_egress_err_status_cnt[26];
3409}
3410
3411static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3412 const struct cntr_entry *entry,
3413 void *context, int vl, int mode, u64 data)
3414{
3415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417 return dd->send_egress_err_status_cnt[25];
3418}
3419
3420static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3421 const struct cntr_entry *entry,
3422 void *context, int vl, int mode, u64 data)
3423{
3424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426 return dd->send_egress_err_status_cnt[24];
3427}
3428
3429static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3430 const struct cntr_entry *entry,
3431 void *context, int vl, int mode, u64 data)
3432{
3433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435 return dd->send_egress_err_status_cnt[23];
3436}
3437
3438static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3439 const struct cntr_entry *entry,
3440 void *context, int vl, int mode, u64 data)
3441{
3442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444 return dd->send_egress_err_status_cnt[22];
3445}
3446
3447static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3448 const struct cntr_entry *entry,
3449 void *context, int vl, int mode, u64 data)
3450{
3451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453 return dd->send_egress_err_status_cnt[21];
3454}
3455
3456static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3457 const struct cntr_entry *entry,
3458 void *context, int vl, int mode, u64 data)
3459{
3460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462 return dd->send_egress_err_status_cnt[20];
3463}
3464
3465static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3466 const struct cntr_entry *entry,
3467 void *context, int vl, int mode, u64 data)
3468{
3469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471 return dd->send_egress_err_status_cnt[19];
3472}
3473
3474static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3475 const struct cntr_entry *entry,
3476 void *context, int vl, int mode, u64 data)
3477{
3478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480 return dd->send_egress_err_status_cnt[18];
3481}
3482
3483static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3484 const struct cntr_entry *entry,
3485 void *context, int vl, int mode, u64 data)
3486{
3487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489 return dd->send_egress_err_status_cnt[17];
3490}
3491
3492static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3493 const struct cntr_entry *entry,
3494 void *context, int vl, int mode, u64 data)
3495{
3496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498 return dd->send_egress_err_status_cnt[16];
3499}
3500
3501static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3502 void *context, int vl, int mode,
3503 u64 data)
3504{
3505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507 return dd->send_egress_err_status_cnt[15];
3508}
3509
3510static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3511 void *context, int vl,
3512 int mode, u64 data)
3513{
3514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516 return dd->send_egress_err_status_cnt[14];
3517}
3518
3519static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3520 void *context, int vl, int mode,
3521 u64 data)
3522{
3523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525 return dd->send_egress_err_status_cnt[13];
3526}
3527
3528static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3529 void *context, int vl, int mode,
3530 u64 data)
3531{
3532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534 return dd->send_egress_err_status_cnt[12];
3535}
3536
3537static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3538 const struct cntr_entry *entry,
3539 void *context, int vl, int mode, u64 data)
3540{
3541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543 return dd->send_egress_err_status_cnt[11];
3544}
3545
3546static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3547 void *context, int vl, int mode,
3548 u64 data)
3549{
3550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552 return dd->send_egress_err_status_cnt[10];
3553}
3554
3555static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3556 void *context, int vl, int mode,
3557 u64 data)
3558{
3559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561 return dd->send_egress_err_status_cnt[9];
3562}
3563
3564static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3565 const struct cntr_entry *entry,
3566 void *context, int vl, int mode, u64 data)
3567{
3568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570 return dd->send_egress_err_status_cnt[8];
3571}
3572
3573static u64 access_tx_pio_launch_intf_parity_err_cnt(
3574 const struct cntr_entry *entry,
3575 void *context, int vl, int mode, u64 data)
3576{
3577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579 return dd->send_egress_err_status_cnt[7];
3580}
3581
3582static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3583 void *context, int vl, int mode,
3584 u64 data)
3585{
3586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588 return dd->send_egress_err_status_cnt[6];
3589}
3590
3591static u64 access_tx_incorrect_link_state_err_cnt(
3592 const struct cntr_entry *entry,
3593 void *context, int vl, int mode, u64 data)
3594{
3595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597 return dd->send_egress_err_status_cnt[5];
3598}
3599
3600static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3601 void *context, int vl, int mode,
3602 u64 data)
3603{
3604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606 return dd->send_egress_err_status_cnt[4];
3607}
3608
3609static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3610 const struct cntr_entry *entry,
3611 void *context, int vl, int mode, u64 data)
3612{
3613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615 return dd->send_egress_err_status_cnt[3];
3616}
3617
3618static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3619 void *context, int vl, int mode,
3620 u64 data)
3621{
3622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624 return dd->send_egress_err_status_cnt[2];
3625}
3626
3627static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3628 const struct cntr_entry *entry,
3629 void *context, int vl, int mode, u64 data)
3630{
3631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633 return dd->send_egress_err_status_cnt[1];
3634}
3635
3636static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3637 const struct cntr_entry *entry,
3638 void *context, int vl, int mode, u64 data)
3639{
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642 return dd->send_egress_err_status_cnt[0];
3643}
3644
3645/*
3646 * Software counters corresponding to each of the
3647 * error status bits within SendErrStatus
3648 */
3649static u64 access_send_csr_write_bad_addr_err_cnt(
3650 const struct cntr_entry *entry,
3651 void *context, int vl, int mode, u64 data)
3652{
3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654
3655 return dd->send_err_status_cnt[2];
3656}
3657
3658static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3659 void *context, int vl,
3660 int mode, u64 data)
3661{
3662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3663
3664 return dd->send_err_status_cnt[1];
3665}
3666
3667static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3668 void *context, int vl, int mode,
3669 u64 data)
3670{
3671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3672
3673 return dd->send_err_status_cnt[0];
3674}
3675
3676/*
3677 * Software counters corresponding to each of the
3678 * error status bits within SendCtxtErrStatus
3679 */
3680static u64 access_pio_write_out_of_bounds_err_cnt(
3681 const struct cntr_entry *entry,
3682 void *context, int vl, int mode, u64 data)
3683{
3684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686 return dd->sw_ctxt_err_status_cnt[4];
3687}
3688
3689static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3690 void *context, int vl, int mode,
3691 u64 data)
3692{
3693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3694
3695 return dd->sw_ctxt_err_status_cnt[3];
3696}
3697
3698static u64 access_pio_write_crosses_boundary_err_cnt(
3699 const struct cntr_entry *entry,
3700 void *context, int vl, int mode, u64 data)
3701{
3702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3703
3704 return dd->sw_ctxt_err_status_cnt[2];
3705}
3706
3707static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3708 void *context, int vl,
3709 int mode, u64 data)
3710{
3711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3712
3713 return dd->sw_ctxt_err_status_cnt[1];
3714}
3715
3716static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3717 void *context, int vl, int mode,
3718 u64 data)
3719{
3720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3721
3722 return dd->sw_ctxt_err_status_cnt[0];
3723}
3724
3725/*
3726 * Software counters corresponding to each of the
3727 * error status bits within SendDmaEngErrStatus
3728 */
3729static u64 access_sdma_header_request_fifo_cor_err_cnt(
3730 const struct cntr_entry *entry,
3731 void *context, int vl, int mode, u64 data)
3732{
3733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735 return dd->sw_send_dma_eng_err_status_cnt[23];
3736}
3737
3738static u64 access_sdma_header_storage_cor_err_cnt(
3739 const struct cntr_entry *entry,
3740 void *context, int vl, int mode, u64 data)
3741{
3742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3743
3744 return dd->sw_send_dma_eng_err_status_cnt[22];
3745}
3746
3747static u64 access_sdma_packet_tracking_cor_err_cnt(
3748 const struct cntr_entry *entry,
3749 void *context, int vl, int mode, u64 data)
3750{
3751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3752
3753 return dd->sw_send_dma_eng_err_status_cnt[21];
3754}
3755
3756static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3757 void *context, int vl, int mode,
3758 u64 data)
3759{
3760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3761
3762 return dd->sw_send_dma_eng_err_status_cnt[20];
3763}
3764
3765static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3766 void *context, int vl, int mode,
3767 u64 data)
3768{
3769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3770
3771 return dd->sw_send_dma_eng_err_status_cnt[19];
3772}
3773
3774static u64 access_sdma_header_request_fifo_unc_err_cnt(
3775 const struct cntr_entry *entry,
3776 void *context, int vl, int mode, u64 data)
3777{
3778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3779
3780 return dd->sw_send_dma_eng_err_status_cnt[18];
3781}
3782
3783static u64 access_sdma_header_storage_unc_err_cnt(
3784 const struct cntr_entry *entry,
3785 void *context, int vl, int mode, u64 data)
3786{
3787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3788
3789 return dd->sw_send_dma_eng_err_status_cnt[17];
3790}
3791
3792static u64 access_sdma_packet_tracking_unc_err_cnt(
3793 const struct cntr_entry *entry,
3794 void *context, int vl, int mode, u64 data)
3795{
3796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3797
3798 return dd->sw_send_dma_eng_err_status_cnt[16];
3799}
3800
3801static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3802 void *context, int vl, int mode,
3803 u64 data)
3804{
3805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3806
3807 return dd->sw_send_dma_eng_err_status_cnt[15];
3808}
3809
3810static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3811 void *context, int vl, int mode,
3812 u64 data)
3813{
3814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3815
3816 return dd->sw_send_dma_eng_err_status_cnt[14];
3817}
3818
3819static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3820 void *context, int vl, int mode,
3821 u64 data)
3822{
3823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3824
3825 return dd->sw_send_dma_eng_err_status_cnt[13];
3826}
3827
3828static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3829 void *context, int vl, int mode,
3830 u64 data)
3831{
3832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834 return dd->sw_send_dma_eng_err_status_cnt[12];
3835}
3836
3837static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3838 void *context, int vl, int mode,
3839 u64 data)
3840{
3841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843 return dd->sw_send_dma_eng_err_status_cnt[11];
3844}
3845
3846static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3847 void *context, int vl, int mode,
3848 u64 data)
3849{
3850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852 return dd->sw_send_dma_eng_err_status_cnt[10];
3853}
3854
3855static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3856 void *context, int vl, int mode,
3857 u64 data)
3858{
3859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861 return dd->sw_send_dma_eng_err_status_cnt[9];
3862}
3863
3864static u64 access_sdma_packet_desc_overflow_err_cnt(
3865 const struct cntr_entry *entry,
3866 void *context, int vl, int mode, u64 data)
3867{
3868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870 return dd->sw_send_dma_eng_err_status_cnt[8];
3871}
3872
3873static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3874 void *context, int vl,
3875 int mode, u64 data)
3876{
3877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879 return dd->sw_send_dma_eng_err_status_cnt[7];
3880}
3881
3882static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3883 void *context, int vl, int mode, u64 data)
3884{
3885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3886
3887 return dd->sw_send_dma_eng_err_status_cnt[6];
3888}
3889
3890static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3891 void *context, int vl, int mode,
3892 u64 data)
3893{
3894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3895
3896 return dd->sw_send_dma_eng_err_status_cnt[5];
3897}
3898
3899static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3900 void *context, int vl, int mode,
3901 u64 data)
3902{
3903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3904
3905 return dd->sw_send_dma_eng_err_status_cnt[4];
3906}
3907
3908static u64 access_sdma_tail_out_of_bounds_err_cnt(
3909 const struct cntr_entry *entry,
3910 void *context, int vl, int mode, u64 data)
3911{
3912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3913
3914 return dd->sw_send_dma_eng_err_status_cnt[3];
3915}
3916
3917static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3918 void *context, int vl, int mode,
3919 u64 data)
3920{
3921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3922
3923 return dd->sw_send_dma_eng_err_status_cnt[2];
3924}
3925
3926static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3927 void *context, int vl, int mode,
3928 u64 data)
3929{
3930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3931
3932 return dd->sw_send_dma_eng_err_status_cnt[1];
3933}
3934
3935static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3936 void *context, int vl, int mode,
3937 u64 data)
3938{
3939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3940
3941 return dd->sw_send_dma_eng_err_status_cnt[0];
3942}
3943
Mike Marciniszyn77241052015-07-30 15:17:43 -04003944#define def_access_sw_cpu(cntr) \
3945static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3946 void *context, int vl, int mode, u64 data) \
3947{ \
3948 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003949 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3950 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003951 mode, data); \
3952}
3953
3954def_access_sw_cpu(rc_acks);
3955def_access_sw_cpu(rc_qacks);
3956def_access_sw_cpu(rc_delayed_comp);
3957
3958#define def_access_ibp_counter(cntr) \
3959static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3960 void *context, int vl, int mode, u64 data) \
3961{ \
3962 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3963 \
3964 if (vl != CNTR_INVALID_VL) \
3965 return 0; \
3966 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003967 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003968 mode, data); \
3969}
3970
3971def_access_ibp_counter(loop_pkts);
3972def_access_ibp_counter(rc_resends);
3973def_access_ibp_counter(rnr_naks);
3974def_access_ibp_counter(other_naks);
3975def_access_ibp_counter(rc_timeouts);
3976def_access_ibp_counter(pkt_drops);
3977def_access_ibp_counter(dmawait);
3978def_access_ibp_counter(rc_seqnak);
3979def_access_ibp_counter(rc_dupreq);
3980def_access_ibp_counter(rdma_seq);
3981def_access_ibp_counter(unaligned);
3982def_access_ibp_counter(seq_naks);
3983
3984static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3985[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3986[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3987 CNTR_NORMAL),
3988[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3989 CNTR_NORMAL),
3990[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3991 RCV_TID_FLOW_GEN_MISMATCH_CNT,
3992 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04003993[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3994 CNTR_NORMAL),
3995[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3996 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3997[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
3998 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
3999[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4000 CNTR_NORMAL),
4001[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4002 CNTR_NORMAL),
4003[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4004 CNTR_NORMAL),
4005[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4006 CNTR_NORMAL),
4007[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4008 CNTR_NORMAL),
4009[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4010 CNTR_NORMAL),
4011[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4012 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4013[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4014 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4015[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4016 CNTR_SYNTH),
4017[C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4018[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4019 CNTR_SYNTH),
4020[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4021 CNTR_SYNTH),
4022[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4023 CNTR_SYNTH),
4024[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4025 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4026[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4027 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4028 CNTR_SYNTH),
4029[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4030 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4031[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4032 CNTR_SYNTH),
4033[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4034 CNTR_SYNTH),
4035[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4036 CNTR_SYNTH),
4037[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4038 CNTR_SYNTH),
4039[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4040 CNTR_SYNTH),
4041[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4042 CNTR_SYNTH),
4043[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4044 CNTR_SYNTH),
4045[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4046 CNTR_SYNTH | CNTR_VL),
4047[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4048 CNTR_SYNTH | CNTR_VL),
4049[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4050[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4051 CNTR_SYNTH | CNTR_VL),
4052[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4053[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4054 CNTR_SYNTH | CNTR_VL),
4055[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4056 CNTR_SYNTH),
4057[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4058 CNTR_SYNTH | CNTR_VL),
4059[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4060 CNTR_SYNTH),
4061[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4062 CNTR_SYNTH | CNTR_VL),
4063[C_DC_TOTAL_CRC] =
4064 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4065 CNTR_SYNTH),
4066[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4067 CNTR_SYNTH),
4068[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4069 CNTR_SYNTH),
4070[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4071 CNTR_SYNTH),
4072[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4073 CNTR_SYNTH),
4074[C_DC_CRC_MULT_LN] =
4075 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4076 CNTR_SYNTH),
4077[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4078 CNTR_SYNTH),
4079[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4080 CNTR_SYNTH),
4081[C_DC_SEQ_CRC_CNT] =
4082 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4083 CNTR_SYNTH),
4084[C_DC_ESC0_ONLY_CNT] =
4085 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4086 CNTR_SYNTH),
4087[C_DC_ESC0_PLUS1_CNT] =
4088 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4089 CNTR_SYNTH),
4090[C_DC_ESC0_PLUS2_CNT] =
4091 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4092 CNTR_SYNTH),
4093[C_DC_REINIT_FROM_PEER_CNT] =
4094 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4095 CNTR_SYNTH),
4096[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4097 CNTR_SYNTH),
4098[C_DC_MISC_FLG_CNT] =
4099 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4100 CNTR_SYNTH),
4101[C_DC_PRF_GOOD_LTP_CNT] =
4102 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4103[C_DC_PRF_ACCEPTED_LTP_CNT] =
4104 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4105 CNTR_SYNTH),
4106[C_DC_PRF_RX_FLIT_CNT] =
4107 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4108[C_DC_PRF_TX_FLIT_CNT] =
4109 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4110[C_DC_PRF_CLK_CNTR] =
4111 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4112[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4113 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4114[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4115 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4116 CNTR_SYNTH),
4117[C_DC_PG_STS_TX_SBE_CNT] =
4118 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4119[C_DC_PG_STS_TX_MBE_CNT] =
4120 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4121 CNTR_SYNTH),
4122[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4123 access_sw_cpu_intr),
4124[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4125 access_sw_cpu_rcv_limit),
4126[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4127 access_sw_vtx_wait),
4128[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4129 access_sw_pio_wait),
4130[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4131 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004132[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4133 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004134[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4135 SEND_DMA_DESC_FETCHED_CNT, 0,
4136 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4137 dev_access_u32_csr),
4138[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4139 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4140 access_sde_int_cnt),
4141[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4142 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4143 access_sde_err_cnt),
4144[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4145 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4146 access_sde_idle_int_cnt),
4147[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4148 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4149 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004150/* MISC_ERR_STATUS */
4151[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4152 CNTR_NORMAL,
4153 access_misc_pll_lock_fail_err_cnt),
4154[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4155 CNTR_NORMAL,
4156 access_misc_mbist_fail_err_cnt),
4157[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4158 CNTR_NORMAL,
4159 access_misc_invalid_eep_cmd_err_cnt),
4160[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4161 CNTR_NORMAL,
4162 access_misc_efuse_done_parity_err_cnt),
4163[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4164 CNTR_NORMAL,
4165 access_misc_efuse_write_err_cnt),
4166[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4167 0, CNTR_NORMAL,
4168 access_misc_efuse_read_bad_addr_err_cnt),
4169[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4170 CNTR_NORMAL,
4171 access_misc_efuse_csr_parity_err_cnt),
4172[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4173 CNTR_NORMAL,
4174 access_misc_fw_auth_failed_err_cnt),
4175[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4176 CNTR_NORMAL,
4177 access_misc_key_mismatch_err_cnt),
4178[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4179 CNTR_NORMAL,
4180 access_misc_sbus_write_failed_err_cnt),
4181[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4182 CNTR_NORMAL,
4183 access_misc_csr_write_bad_addr_err_cnt),
4184[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4185 CNTR_NORMAL,
4186 access_misc_csr_read_bad_addr_err_cnt),
4187[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4188 CNTR_NORMAL,
4189 access_misc_csr_parity_err_cnt),
4190/* CceErrStatus */
4191[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4192 CNTR_NORMAL,
4193 access_sw_cce_err_status_aggregated_cnt),
4194[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4195 CNTR_NORMAL,
4196 access_cce_msix_csr_parity_err_cnt),
4197[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4198 CNTR_NORMAL,
4199 access_cce_int_map_unc_err_cnt),
4200[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4201 CNTR_NORMAL,
4202 access_cce_int_map_cor_err_cnt),
4203[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4204 CNTR_NORMAL,
4205 access_cce_msix_table_unc_err_cnt),
4206[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4207 CNTR_NORMAL,
4208 access_cce_msix_table_cor_err_cnt),
4209[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4210 0, CNTR_NORMAL,
4211 access_cce_rxdma_conv_fifo_parity_err_cnt),
4212[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4213 0, CNTR_NORMAL,
4214 access_cce_rcpl_async_fifo_parity_err_cnt),
4215[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4216 CNTR_NORMAL,
4217 access_cce_seg_write_bad_addr_err_cnt),
4218[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4219 CNTR_NORMAL,
4220 access_cce_seg_read_bad_addr_err_cnt),
4221[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4222 CNTR_NORMAL,
4223 access_la_triggered_cnt),
4224[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4225 CNTR_NORMAL,
4226 access_cce_trgt_cpl_timeout_err_cnt),
4227[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4228 CNTR_NORMAL,
4229 access_pcic_receive_parity_err_cnt),
4230[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4231 CNTR_NORMAL,
4232 access_pcic_transmit_back_parity_err_cnt),
4233[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4234 0, CNTR_NORMAL,
4235 access_pcic_transmit_front_parity_err_cnt),
4236[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4237 CNTR_NORMAL,
4238 access_pcic_cpl_dat_q_unc_err_cnt),
4239[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4240 CNTR_NORMAL,
4241 access_pcic_cpl_hd_q_unc_err_cnt),
4242[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4243 CNTR_NORMAL,
4244 access_pcic_post_dat_q_unc_err_cnt),
4245[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4246 CNTR_NORMAL,
4247 access_pcic_post_hd_q_unc_err_cnt),
4248[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4249 CNTR_NORMAL,
4250 access_pcic_retry_sot_mem_unc_err_cnt),
4251[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4252 CNTR_NORMAL,
4253 access_pcic_retry_mem_unc_err),
4254[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4255 CNTR_NORMAL,
4256 access_pcic_n_post_dat_q_parity_err_cnt),
4257[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4258 CNTR_NORMAL,
4259 access_pcic_n_post_h_q_parity_err_cnt),
4260[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4261 CNTR_NORMAL,
4262 access_pcic_cpl_dat_q_cor_err_cnt),
4263[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4264 CNTR_NORMAL,
4265 access_pcic_cpl_hd_q_cor_err_cnt),
4266[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4267 CNTR_NORMAL,
4268 access_pcic_post_dat_q_cor_err_cnt),
4269[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4270 CNTR_NORMAL,
4271 access_pcic_post_hd_q_cor_err_cnt),
4272[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4273 CNTR_NORMAL,
4274 access_pcic_retry_sot_mem_cor_err_cnt),
4275[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4276 CNTR_NORMAL,
4277 access_pcic_retry_mem_cor_err_cnt),
4278[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4279 "CceCli1AsyncFifoDbgParityError", 0, 0,
4280 CNTR_NORMAL,
4281 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4282[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4283 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4284 CNTR_NORMAL,
4285 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4286 ),
4287[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4288 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4289 CNTR_NORMAL,
4290 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4291[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4292 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4293 CNTR_NORMAL,
4294 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4295[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4296 0, CNTR_NORMAL,
4297 access_cce_cli2_async_fifo_parity_err_cnt),
4298[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4299 CNTR_NORMAL,
4300 access_cce_csr_cfg_bus_parity_err_cnt),
4301[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4302 0, CNTR_NORMAL,
4303 access_cce_cli0_async_fifo_parity_err_cnt),
4304[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_cce_rspd_data_parity_err_cnt),
4307[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_cce_trgt_access_err_cnt),
4310[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4311 0, CNTR_NORMAL,
4312 access_cce_trgt_async_fifo_parity_err_cnt),
4313[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_cce_csr_write_bad_addr_err_cnt),
4316[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4317 CNTR_NORMAL,
4318 access_cce_csr_read_bad_addr_err_cnt),
4319[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4320 CNTR_NORMAL,
4321 access_ccs_csr_parity_err_cnt),
4322
4323/* RcvErrStatus */
4324[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_rx_csr_parity_err_cnt),
4327[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4328 CNTR_NORMAL,
4329 access_rx_csr_write_bad_addr_err_cnt),
4330[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_rx_csr_read_bad_addr_err_cnt),
4333[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_rx_dma_csr_unc_err_cnt),
4336[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_rx_dma_dq_fsm_encoding_err_cnt),
4339[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4340 CNTR_NORMAL,
4341 access_rx_dma_eq_fsm_encoding_err_cnt),
4342[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_rx_dma_csr_parity_err_cnt),
4345[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_rx_rbuf_data_cor_err_cnt),
4348[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_rx_rbuf_data_unc_err_cnt),
4351[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_rx_dma_data_fifo_rd_cor_err_cnt),
4354[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_rx_dma_data_fifo_rd_unc_err_cnt),
4357[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4360[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4363[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_rx_rbuf_desc_part2_cor_err_cnt),
4366[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_rx_rbuf_desc_part2_unc_err_cnt),
4369[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_rx_rbuf_desc_part1_cor_err_cnt),
4372[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_rx_rbuf_desc_part1_unc_err_cnt),
4375[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_rx_hq_intr_fsm_err_cnt),
4378[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_rx_hq_intr_csr_parity_err_cnt),
4381[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_rx_lookup_csr_parity_err_cnt),
4384[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_rx_lookup_rcv_array_cor_err_cnt),
4387[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_rx_lookup_rcv_array_unc_err_cnt),
4390[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4391 0, CNTR_NORMAL,
4392 access_rx_lookup_des_part2_parity_err_cnt),
4393[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4394 0, CNTR_NORMAL,
4395 access_rx_lookup_des_part1_unc_cor_err_cnt),
4396[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4397 CNTR_NORMAL,
4398 access_rx_lookup_des_part1_unc_err_cnt),
4399[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4400 CNTR_NORMAL,
4401 access_rx_rbuf_next_free_buf_cor_err_cnt),
4402[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4403 CNTR_NORMAL,
4404 access_rx_rbuf_next_free_buf_unc_err_cnt),
4405[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4406 "RxRbufFlInitWrAddrParityErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4409[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4410 0, CNTR_NORMAL,
4411 access_rx_rbuf_fl_initdone_parity_err_cnt),
4412[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4413 0, CNTR_NORMAL,
4414 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4415[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4418[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rx_rbuf_empty_err_cnt),
4421[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4422 CNTR_NORMAL,
4423 access_rx_rbuf_full_err_cnt),
4424[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4425 CNTR_NORMAL,
4426 access_rbuf_bad_lookup_err_cnt),
4427[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4428 CNTR_NORMAL,
4429 access_rbuf_ctx_id_parity_err_cnt),
4430[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4431 CNTR_NORMAL,
4432 access_rbuf_csr_qeopdw_parity_err_cnt),
4433[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4434 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4437[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4438 "RxRbufCsrQTlPtrParityErr", 0, 0,
4439 CNTR_NORMAL,
4440 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4441[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4442 0, CNTR_NORMAL,
4443 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4444[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4445 0, CNTR_NORMAL,
4446 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4447[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4448 0, 0, CNTR_NORMAL,
4449 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4450[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4451 0, CNTR_NORMAL,
4452 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4453[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4454 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4457[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4458 0, CNTR_NORMAL,
4459 access_rx_rbuf_block_list_read_cor_err_cnt),
4460[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4461 0, CNTR_NORMAL,
4462 access_rx_rbuf_block_list_read_unc_err_cnt),
4463[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_lookup_des_cor_err_cnt),
4466[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_rbuf_lookup_des_unc_err_cnt),
4469[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4470 "RxRbufLookupDesRegUncCorErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4473[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4476[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_rx_rbuf_free_list_cor_err_cnt),
4479[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4480 CNTR_NORMAL,
4481 access_rx_rbuf_free_list_unc_err_cnt),
4482[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4483 CNTR_NORMAL,
4484 access_rx_rcv_fsm_encoding_err_cnt),
4485[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4486 CNTR_NORMAL,
4487 access_rx_dma_flag_cor_err_cnt),
4488[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4489 CNTR_NORMAL,
4490 access_rx_dma_flag_unc_err_cnt),
4491[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4492 CNTR_NORMAL,
4493 access_rx_dc_sop_eop_parity_err_cnt),
4494[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_rcv_csr_parity_err_cnt),
4497[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4498 CNTR_NORMAL,
4499 access_rx_rcv_qp_map_table_cor_err_cnt),
4500[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4501 CNTR_NORMAL,
4502 access_rx_rcv_qp_map_table_unc_err_cnt),
4503[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_rcv_data_cor_err_cnt),
4506[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_rcv_data_unc_err_cnt),
4509[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_rx_rcv_hdr_cor_err_cnt),
4512[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4513 CNTR_NORMAL,
4514 access_rx_rcv_hdr_unc_err_cnt),
4515[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4516 CNTR_NORMAL,
4517 access_rx_dc_intf_parity_err_cnt),
4518[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4519 CNTR_NORMAL,
4520 access_rx_dma_csr_cor_err_cnt),
4521/* SendPioErrStatus */
4522[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_pio_pec_sop_head_parity_err_cnt),
4525[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_pio_pcc_sop_head_parity_err_cnt),
4528[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4529 0, 0, CNTR_NORMAL,
4530 access_pio_last_returned_cnt_parity_err_cnt),
4531[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4532 0, CNTR_NORMAL,
4533 access_pio_current_free_cnt_parity_err_cnt),
4534[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4535 CNTR_NORMAL,
4536 access_pio_reserved_31_err_cnt),
4537[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4538 CNTR_NORMAL,
4539 access_pio_reserved_30_err_cnt),
4540[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_pio_ppmc_sop_len_err_cnt),
4543[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4544 CNTR_NORMAL,
4545 access_pio_ppmc_bqc_mem_parity_err_cnt),
4546[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4547 CNTR_NORMAL,
4548 access_pio_vl_fifo_parity_err_cnt),
4549[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_pio_vlf_sop_parity_err_cnt),
4552[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_pio_vlf_v1_len_parity_err_cnt),
4555[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_pio_block_qw_count_parity_err_cnt),
4558[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_pio_write_qw_valid_parity_err_cnt),
4561[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4562 CNTR_NORMAL,
4563 access_pio_state_machine_err_cnt),
4564[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4565 CNTR_NORMAL,
4566 access_pio_write_data_parity_err_cnt),
4567[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4568 CNTR_NORMAL,
4569 access_pio_host_addr_mem_cor_err_cnt),
4570[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4571 CNTR_NORMAL,
4572 access_pio_host_addr_mem_unc_err_cnt),
4573[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4574 CNTR_NORMAL,
4575 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4576[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4577 CNTR_NORMAL,
4578 access_pio_init_sm_in_err_cnt),
4579[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4580 CNTR_NORMAL,
4581 access_pio_ppmc_pbl_fifo_err_cnt),
4582[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4583 0, CNTR_NORMAL,
4584 access_pio_credit_ret_fifo_parity_err_cnt),
4585[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4586 CNTR_NORMAL,
4587 access_pio_v1_len_mem_bank1_cor_err_cnt),
4588[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_pio_v1_len_mem_bank0_cor_err_cnt),
4591[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_pio_v1_len_mem_bank1_unc_err_cnt),
4594[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_pio_v1_len_mem_bank0_unc_err_cnt),
4597[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_pio_sm_pkt_reset_parity_err_cnt),
4600[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_pio_pkt_evict_fifo_parity_err_cnt),
4603[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4604 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4607[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_sbrdctl_crrel_parity_err_cnt),
4610[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4611 CNTR_NORMAL,
4612 access_pio_pec_fifo_parity_err_cnt),
4613[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_pcc_fifo_parity_err_cnt),
4616[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_sb_mem_fifo1_err_cnt),
4619[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4620 CNTR_NORMAL,
4621 access_pio_sb_mem_fifo0_err_cnt),
4622[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4623 CNTR_NORMAL,
4624 access_pio_csr_parity_err_cnt),
4625[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_pio_write_addr_parity_err_cnt),
4628[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4629 CNTR_NORMAL,
4630 access_pio_write_bad_ctxt_err_cnt),
4631/* SendDmaErrStatus */
4632[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4633 0, CNTR_NORMAL,
4634 access_sdma_pcie_req_tracking_cor_err_cnt),
4635[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4636 0, CNTR_NORMAL,
4637 access_sdma_pcie_req_tracking_unc_err_cnt),
4638[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_sdma_csr_parity_err_cnt),
4641[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4642 CNTR_NORMAL,
4643 access_sdma_rpy_tag_err_cnt),
4644/* SendEgressErrStatus */
4645[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_tx_read_pio_memory_csr_unc_err_cnt),
4648[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4649 0, CNTR_NORMAL,
4650 access_tx_read_sdma_memory_csr_err_cnt),
4651[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4652 CNTR_NORMAL,
4653 access_tx_egress_fifo_cor_err_cnt),
4654[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4655 CNTR_NORMAL,
4656 access_tx_read_pio_memory_cor_err_cnt),
4657[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4658 CNTR_NORMAL,
4659 access_tx_read_sdma_memory_cor_err_cnt),
4660[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_tx_sb_hdr_cor_err_cnt),
4663[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_tx_credit_overrun_err_cnt),
4666[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_tx_launch_fifo8_cor_err_cnt),
4669[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4670 CNTR_NORMAL,
4671 access_tx_launch_fifo7_cor_err_cnt),
4672[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4673 CNTR_NORMAL,
4674 access_tx_launch_fifo6_cor_err_cnt),
4675[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_tx_launch_fifo5_cor_err_cnt),
4678[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_tx_launch_fifo4_cor_err_cnt),
4681[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_tx_launch_fifo3_cor_err_cnt),
4684[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_tx_launch_fifo2_cor_err_cnt),
4687[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4688 CNTR_NORMAL,
4689 access_tx_launch_fifo1_cor_err_cnt),
4690[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4691 CNTR_NORMAL,
4692 access_tx_launch_fifo0_cor_err_cnt),
4693[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_tx_credit_return_vl_err_cnt),
4696[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_tx_hcrc_insertion_err_cnt),
4699[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_tx_egress_fifo_unc_err_cnt),
4702[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4703 CNTR_NORMAL,
4704 access_tx_read_pio_memory_unc_err_cnt),
4705[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4706 CNTR_NORMAL,
4707 access_tx_read_sdma_memory_unc_err_cnt),
4708[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4709 CNTR_NORMAL,
4710 access_tx_sb_hdr_unc_err_cnt),
4711[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4712 CNTR_NORMAL,
4713 access_tx_credit_return_partiy_err_cnt),
4714[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4715 0, 0, CNTR_NORMAL,
4716 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4717[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4718 0, 0, CNTR_NORMAL,
4719 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4720[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4721 0, 0, CNTR_NORMAL,
4722 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4723[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4724 0, 0, CNTR_NORMAL,
4725 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4726[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4727 0, 0, CNTR_NORMAL,
4728 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4729[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4730 0, 0, CNTR_NORMAL,
4731 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4732[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4733 0, 0, CNTR_NORMAL,
4734 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4735[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4736 0, 0, CNTR_NORMAL,
4737 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4738[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4739 0, 0, CNTR_NORMAL,
4740 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4741[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4742 0, 0, CNTR_NORMAL,
4743 access_tx_sdma15_disallowed_packet_err_cnt),
4744[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4745 0, 0, CNTR_NORMAL,
4746 access_tx_sdma14_disallowed_packet_err_cnt),
4747[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4748 0, 0, CNTR_NORMAL,
4749 access_tx_sdma13_disallowed_packet_err_cnt),
4750[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4751 0, 0, CNTR_NORMAL,
4752 access_tx_sdma12_disallowed_packet_err_cnt),
4753[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4754 0, 0, CNTR_NORMAL,
4755 access_tx_sdma11_disallowed_packet_err_cnt),
4756[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4757 0, 0, CNTR_NORMAL,
4758 access_tx_sdma10_disallowed_packet_err_cnt),
4759[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4760 0, 0, CNTR_NORMAL,
4761 access_tx_sdma9_disallowed_packet_err_cnt),
4762[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4763 0, 0, CNTR_NORMAL,
4764 access_tx_sdma8_disallowed_packet_err_cnt),
4765[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4766 0, 0, CNTR_NORMAL,
4767 access_tx_sdma7_disallowed_packet_err_cnt),
4768[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4769 0, 0, CNTR_NORMAL,
4770 access_tx_sdma6_disallowed_packet_err_cnt),
4771[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4772 0, 0, CNTR_NORMAL,
4773 access_tx_sdma5_disallowed_packet_err_cnt),
4774[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4775 0, 0, CNTR_NORMAL,
4776 access_tx_sdma4_disallowed_packet_err_cnt),
4777[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4778 0, 0, CNTR_NORMAL,
4779 access_tx_sdma3_disallowed_packet_err_cnt),
4780[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4781 0, 0, CNTR_NORMAL,
4782 access_tx_sdma2_disallowed_packet_err_cnt),
4783[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4784 0, 0, CNTR_NORMAL,
4785 access_tx_sdma1_disallowed_packet_err_cnt),
4786[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4787 0, 0, CNTR_NORMAL,
4788 access_tx_sdma0_disallowed_packet_err_cnt),
4789[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4790 CNTR_NORMAL,
4791 access_tx_config_parity_err_cnt),
4792[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4793 CNTR_NORMAL,
4794 access_tx_sbrd_ctl_csr_parity_err_cnt),
4795[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4796 CNTR_NORMAL,
4797 access_tx_launch_csr_parity_err_cnt),
4798[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4799 CNTR_NORMAL,
4800 access_tx_illegal_vl_err_cnt),
4801[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4802 "TxSbrdCtlStateMachineParityErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4805[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4806 CNTR_NORMAL,
4807 access_egress_reserved_10_err_cnt),
4808[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4809 CNTR_NORMAL,
4810 access_egress_reserved_9_err_cnt),
4811[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4812 0, 0, CNTR_NORMAL,
4813 access_tx_sdma_launch_intf_parity_err_cnt),
4814[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_pio_launch_intf_parity_err_cnt),
4817[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4818 CNTR_NORMAL,
4819 access_egress_reserved_6_err_cnt),
4820[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4821 CNTR_NORMAL,
4822 access_tx_incorrect_link_state_err_cnt),
4823[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_linkdown_err_cnt),
4826[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4827 "EgressFifoUnderrunOrParityErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4830[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4831 CNTR_NORMAL,
4832 access_egress_reserved_2_err_cnt),
4833[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4834 CNTR_NORMAL,
4835 access_tx_pkt_integrity_mem_unc_err_cnt),
4836[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4837 CNTR_NORMAL,
4838 access_tx_pkt_integrity_mem_cor_err_cnt),
4839/* SendErrStatus */
4840[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_send_csr_write_bad_addr_err_cnt),
4843[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4844 CNTR_NORMAL,
4845 access_send_csr_read_bad_addr_err_cnt),
4846[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4847 CNTR_NORMAL,
4848 access_send_csr_parity_cnt),
4849/* SendCtxtErrStatus */
4850[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4851 CNTR_NORMAL,
4852 access_pio_write_out_of_bounds_err_cnt),
4853[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4854 CNTR_NORMAL,
4855 access_pio_write_overflow_err_cnt),
4856[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4857 0, 0, CNTR_NORMAL,
4858 access_pio_write_crosses_boundary_err_cnt),
4859[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4860 CNTR_NORMAL,
4861 access_pio_disallowed_packet_err_cnt),
4862[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4863 CNTR_NORMAL,
4864 access_pio_inconsistent_sop_err_cnt),
4865/* SendDmaEngErrStatus */
4866[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4867 0, 0, CNTR_NORMAL,
4868 access_sdma_header_request_fifo_cor_err_cnt),
4869[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4870 CNTR_NORMAL,
4871 access_sdma_header_storage_cor_err_cnt),
4872[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4873 CNTR_NORMAL,
4874 access_sdma_packet_tracking_cor_err_cnt),
4875[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4876 CNTR_NORMAL,
4877 access_sdma_assembly_cor_err_cnt),
4878[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_sdma_desc_table_cor_err_cnt),
4881[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4882 0, 0, CNTR_NORMAL,
4883 access_sdma_header_request_fifo_unc_err_cnt),
4884[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_sdma_header_storage_unc_err_cnt),
4887[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4888 CNTR_NORMAL,
4889 access_sdma_packet_tracking_unc_err_cnt),
4890[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4891 CNTR_NORMAL,
4892 access_sdma_assembly_unc_err_cnt),
4893[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_sdma_desc_table_unc_err_cnt),
4896[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4897 CNTR_NORMAL,
4898 access_sdma_timeout_err_cnt),
4899[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_sdma_header_length_err_cnt),
4902[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_sdma_header_address_err_cnt),
4905[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4906 CNTR_NORMAL,
4907 access_sdma_header_select_err_cnt),
4908[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4909 CNTR_NORMAL,
4910 access_sdma_reserved_9_err_cnt),
4911[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_sdma_packet_desc_overflow_err_cnt),
4914[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_sdma_length_mismatch_err_cnt),
4917[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_sdma_halt_err_cnt),
4920[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_sdma_mem_read_err_cnt),
4923[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4924 CNTR_NORMAL,
4925 access_sdma_first_desc_err_cnt),
4926[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4927 CNTR_NORMAL,
4928 access_sdma_tail_out_of_bounds_err_cnt),
4929[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4930 CNTR_NORMAL,
4931 access_sdma_too_long_err_cnt),
4932[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_sdma_gen_mismatch_err_cnt),
4935[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4936 CNTR_NORMAL,
4937 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004938};
4939
4940static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4941[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4942 CNTR_NORMAL),
4943[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4944 CNTR_NORMAL),
4945[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4946 CNTR_NORMAL),
4947[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4948 CNTR_NORMAL),
4949[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4950 CNTR_NORMAL),
4951[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4952 CNTR_NORMAL),
4953[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4954 CNTR_NORMAL),
4955[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4956[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4957[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4958[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4959 CNTR_SYNTH | CNTR_VL),
4960[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4961 CNTR_SYNTH | CNTR_VL),
4962[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4963 CNTR_SYNTH | CNTR_VL),
4964[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4965[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4966[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4967 access_sw_link_dn_cnt),
4968[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4969 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05004970[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4971 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004972[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4973 access_sw_xmit_discards),
4974[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4975 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4976 access_sw_xmit_discards),
4977[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4978 access_xmit_constraint_errs),
4979[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4980 access_rcv_constraint_errs),
4981[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4982[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4983[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4984[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4985[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4986[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4987[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4988[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4989[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4990[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4991[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4992[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4993[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4994 access_sw_cpu_rc_acks),
4995[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
4996 access_sw_cpu_rc_qacks),
4997[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
4998 access_sw_cpu_rc_delayed_comp),
4999[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5000[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5001[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5002[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5003[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5004[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5005[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5006[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5007[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5008[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5009[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5010[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5011[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5012[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5013[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5014[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5015[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5016[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5017[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5018[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5019[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5020[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5021[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5022[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5023[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5024[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5025[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5026[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5027[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5028[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5029[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5030[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5031[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5032[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5033[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5034[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5035[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5036[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5037[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5038[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5039[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5040[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5041[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5042[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5043[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5044[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5045[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5046[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5047[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5048[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5049[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5050[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5051[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5052[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5053[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5054[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5055[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5056[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5057[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5058[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5059[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5060[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5061[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5062[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5063[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5064[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5065[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5066[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5067[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5068[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5069[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5070[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5071[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5072[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5073[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5074[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5075[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5076[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5077[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5078[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5079};
5080
5081/* ======================================================================== */
5082
Mike Marciniszyn77241052015-07-30 15:17:43 -04005083/* return true if this is chip revision revision a */
5084int is_ax(struct hfi1_devdata *dd)
5085{
5086 u8 chip_rev_minor =
5087 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5088 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5089 return (chip_rev_minor & 0xf0) == 0;
5090}
5091
5092/* return true if this is chip revision revision b */
5093int is_bx(struct hfi1_devdata *dd)
5094{
5095 u8 chip_rev_minor =
5096 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5097 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005098 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005099}
5100
5101/*
5102 * Append string s to buffer buf. Arguments curp and len are the current
5103 * position and remaining length, respectively.
5104 *
5105 * return 0 on success, 1 on out of room
5106 */
5107static int append_str(char *buf, char **curp, int *lenp, const char *s)
5108{
5109 char *p = *curp;
5110 int len = *lenp;
5111 int result = 0; /* success */
5112 char c;
5113
5114 /* add a comma, if first in the buffer */
5115 if (p != buf) {
5116 if (len == 0) {
5117 result = 1; /* out of room */
5118 goto done;
5119 }
5120 *p++ = ',';
5121 len--;
5122 }
5123
5124 /* copy the string */
5125 while ((c = *s++) != 0) {
5126 if (len == 0) {
5127 result = 1; /* out of room */
5128 goto done;
5129 }
5130 *p++ = c;
5131 len--;
5132 }
5133
5134done:
5135 /* write return values */
5136 *curp = p;
5137 *lenp = len;
5138
5139 return result;
5140}
5141
5142/*
5143 * Using the given flag table, print a comma separated string into
5144 * the buffer. End in '*' if the buffer is too short.
5145 */
5146static char *flag_string(char *buf, int buf_len, u64 flags,
5147 struct flag_table *table, int table_size)
5148{
5149 char extra[32];
5150 char *p = buf;
5151 int len = buf_len;
5152 int no_room = 0;
5153 int i;
5154
5155 /* make sure there is at least 2 so we can form "*" */
5156 if (len < 2)
5157 return "";
5158
5159 len--; /* leave room for a nul */
5160 for (i = 0; i < table_size; i++) {
5161 if (flags & table[i].flag) {
5162 no_room = append_str(buf, &p, &len, table[i].str);
5163 if (no_room)
5164 break;
5165 flags &= ~table[i].flag;
5166 }
5167 }
5168
5169 /* any undocumented bits left? */
5170 if (!no_room && flags) {
5171 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5172 no_room = append_str(buf, &p, &len, extra);
5173 }
5174
5175 /* add * if ran out of room */
5176 if (no_room) {
5177 /* may need to back up to add space for a '*' */
5178 if (len == 0)
5179 --p;
5180 *p++ = '*';
5181 }
5182
5183 /* add final nul - space already allocated above */
5184 *p = 0;
5185 return buf;
5186}
5187
5188/* first 8 CCE error interrupt source names */
5189static const char * const cce_misc_names[] = {
5190 "CceErrInt", /* 0 */
5191 "RxeErrInt", /* 1 */
5192 "MiscErrInt", /* 2 */
5193 "Reserved3", /* 3 */
5194 "PioErrInt", /* 4 */
5195 "SDmaErrInt", /* 5 */
5196 "EgressErrInt", /* 6 */
5197 "TxeErrInt" /* 7 */
5198};
5199
5200/*
5201 * Return the miscellaneous error interrupt name.
5202 */
5203static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5204{
5205 if (source < ARRAY_SIZE(cce_misc_names))
5206 strncpy(buf, cce_misc_names[source], bsize);
5207 else
5208 snprintf(buf,
5209 bsize,
5210 "Reserved%u",
5211 source + IS_GENERAL_ERR_START);
5212
5213 return buf;
5214}
5215
5216/*
5217 * Return the SDMA engine error interrupt name.
5218 */
5219static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5220{
5221 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5222 return buf;
5223}
5224
5225/*
5226 * Return the send context error interrupt name.
5227 */
5228static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5229{
5230 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5231 return buf;
5232}
5233
5234static const char * const various_names[] = {
5235 "PbcInt",
5236 "GpioAssertInt",
5237 "Qsfp1Int",
5238 "Qsfp2Int",
5239 "TCritInt"
5240};
5241
5242/*
5243 * Return the various interrupt name.
5244 */
5245static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5246{
5247 if (source < ARRAY_SIZE(various_names))
5248 strncpy(buf, various_names[source], bsize);
5249 else
5250 snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
5251 return buf;
5252}
5253
5254/*
5255 * Return the DC interrupt name.
5256 */
5257static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5258{
5259 static const char * const dc_int_names[] = {
5260 "common",
5261 "lcb",
5262 "8051",
5263 "lbm" /* local block merge */
5264 };
5265
5266 if (source < ARRAY_SIZE(dc_int_names))
5267 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5268 else
5269 snprintf(buf, bsize, "DCInt%u", source);
5270 return buf;
5271}
5272
5273static const char * const sdma_int_names[] = {
5274 "SDmaInt",
5275 "SdmaIdleInt",
5276 "SdmaProgressInt",
5277};
5278
5279/*
5280 * Return the SDMA engine interrupt name.
5281 */
5282static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5283{
5284 /* what interrupt */
5285 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5286 /* which engine */
5287 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5288
5289 if (likely(what < 3))
5290 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5291 else
5292 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5293 return buf;
5294}
5295
5296/*
5297 * Return the receive available interrupt name.
5298 */
5299static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5300{
5301 snprintf(buf, bsize, "RcvAvailInt%u", source);
5302 return buf;
5303}
5304
5305/*
5306 * Return the receive urgent interrupt name.
5307 */
5308static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5309{
5310 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5311 return buf;
5312}
5313
5314/*
5315 * Return the send credit interrupt name.
5316 */
5317static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5318{
5319 snprintf(buf, bsize, "SendCreditInt%u", source);
5320 return buf;
5321}
5322
5323/*
5324 * Return the reserved interrupt name.
5325 */
5326static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5327{
5328 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5329 return buf;
5330}
5331
5332static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5333{
5334 return flag_string(buf, buf_len, flags,
5335 cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
5336}
5337
5338static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5339{
5340 return flag_string(buf, buf_len, flags,
5341 rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
5342}
5343
5344static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5345{
5346 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5347 ARRAY_SIZE(misc_err_status_flags));
5348}
5349
5350static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5351{
5352 return flag_string(buf, buf_len, flags,
5353 pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
5354}
5355
5356static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5357{
5358 return flag_string(buf, buf_len, flags,
5359 sdma_err_status_flags,
5360 ARRAY_SIZE(sdma_err_status_flags));
5361}
5362
5363static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5364{
5365 return flag_string(buf, buf_len, flags,
5366 egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
5367}
5368
5369static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5370{
5371 return flag_string(buf, buf_len, flags,
5372 egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
5373}
5374
5375static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5376{
5377 return flag_string(buf, buf_len, flags,
5378 send_err_status_flags,
5379 ARRAY_SIZE(send_err_status_flags));
5380}
5381
5382static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5383{
5384 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005385 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005386
5387 /*
5388 * For most these errors, there is nothing that can be done except
5389 * report or record it.
5390 */
5391 dd_dev_info(dd, "CCE Error: %s\n",
5392 cce_err_status_string(buf, sizeof(buf), reg));
5393
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005394 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5395 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005396 /* this error requires a manual drop into SPC freeze mode */
5397 /* then a fix up */
5398 start_freeze_handling(dd->pport, FREEZE_SELF);
5399 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005400
5401 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5402 if (reg & (1ull << i)) {
5403 incr_cntr64(&dd->cce_err_status_cnt[i]);
5404 /* maintain a counter over all cce_err_status errors */
5405 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5406 }
5407 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005408}
5409
5410/*
5411 * Check counters for receive errors that do not have an interrupt
5412 * associated with them.
5413 */
5414#define RCVERR_CHECK_TIME 10
5415static void update_rcverr_timer(unsigned long opaque)
5416{
5417 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5418 struct hfi1_pportdata *ppd = dd->pport;
5419 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5420
5421 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5422 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5423 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5424 set_link_down_reason(ppd,
5425 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5426 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5427 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5428 }
5429 dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
5430
5431 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5432}
5433
5434static int init_rcverr(struct hfi1_devdata *dd)
5435{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305436 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005437 /* Assume the hardware counter has been reset */
5438 dd->rcv_ovfl_cnt = 0;
5439 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5440}
5441
5442static void free_rcverr(struct hfi1_devdata *dd)
5443{
5444 if (dd->rcverr_timer.data)
5445 del_timer_sync(&dd->rcverr_timer);
5446 dd->rcverr_timer.data = 0;
5447}
5448
5449static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5450{
5451 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005452 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005453
5454 dd_dev_info(dd, "Receive Error: %s\n",
5455 rxe_err_status_string(buf, sizeof(buf), reg));
5456
5457 if (reg & ALL_RXE_FREEZE_ERR) {
5458 int flags = 0;
5459
5460 /*
5461 * Freeze mode recovery is disabled for the errors
5462 * in RXE_FREEZE_ABORT_MASK
5463 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005464 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005465 flags = FREEZE_ABORT;
5466
5467 start_freeze_handling(dd->pport, flags);
5468 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005469
5470 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5471 if (reg & (1ull << i))
5472 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5473 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005474}
5475
5476static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5477{
5478 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005479 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005480
5481 dd_dev_info(dd, "Misc Error: %s",
5482 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005483 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5484 if (reg & (1ull << i))
5485 incr_cntr64(&dd->misc_err_status_cnt[i]);
5486 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005487}
5488
5489static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5490{
5491 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005492 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005493
5494 dd_dev_info(dd, "PIO Error: %s\n",
5495 pio_err_status_string(buf, sizeof(buf), reg));
5496
5497 if (reg & ALL_PIO_FREEZE_ERR)
5498 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005499
5500 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5501 if (reg & (1ull << i))
5502 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5503 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005504}
5505
5506static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5507{
5508 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005509 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005510
5511 dd_dev_info(dd, "SDMA Error: %s\n",
5512 sdma_err_status_string(buf, sizeof(buf), reg));
5513
5514 if (reg & ALL_SDMA_FREEZE_ERR)
5515 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005516
5517 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5518 if (reg & (1ull << i))
5519 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5520 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005521}
5522
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005523static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5524{
5525 incr_cntr64(&ppd->port_xmit_discards);
5526}
5527
Mike Marciniszyn77241052015-07-30 15:17:43 -04005528static void count_port_inactive(struct hfi1_devdata *dd)
5529{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005530 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005531}
5532
5533/*
5534 * We have had a "disallowed packet" error during egress. Determine the
5535 * integrity check which failed, and update relevant error counter, etc.
5536 *
5537 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5538 * bit of state per integrity check, and so we can miss the reason for an
5539 * egress error if more than one packet fails the same integrity check
5540 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5541 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005542static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5543 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005544{
5545 struct hfi1_pportdata *ppd = dd->pport;
5546 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5547 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5548 char buf[96];
5549
5550 /* clear down all observed info as quickly as possible after read */
5551 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5552
5553 dd_dev_info(dd,
5554 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5555 info, egress_err_info_string(buf, sizeof(buf), info), src);
5556
5557 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005558 if (info & PORT_DISCARD_EGRESS_ERRS) {
5559 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005560
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005561 /*
5562 * Count all, in case multiple bits are set. Reminder:
5563 * since there is only one info register for many sources,
5564 * these may be attributed to the wrong VL if they occur
5565 * too close together.
5566 */
5567 weight = hweight64(info);
5568 for (i = 0; i < weight; i++) {
5569 __count_port_discards(ppd);
5570 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5571 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5572 else if (vl == 15)
5573 incr_cntr64(&ppd->port_xmit_discards_vl
5574 [C_VL_15]);
5575 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005576 }
5577}
5578
5579/*
5580 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5581 * register. Does it represent a 'port inactive' error?
5582 */
5583static inline int port_inactive_err(u64 posn)
5584{
5585 return (posn >= SEES(TX_LINKDOWN) &&
5586 posn <= SEES(TX_INCORRECT_LINK_STATE));
5587}
5588
5589/*
5590 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5591 * register. Does it represent a 'disallowed packet' error?
5592 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005593static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005594{
5595 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5596 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5597}
5598
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005599/*
5600 * Input value is a bit position of one of the SDMA engine disallowed
5601 * packet errors. Return which engine. Use of this must be guarded by
5602 * disallowed_pkt_err().
5603 */
5604static inline int disallowed_pkt_engine(int posn)
5605{
5606 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5607}
5608
5609/*
5610 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5611 * be done.
5612 */
5613static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5614{
5615 struct sdma_vl_map *m;
5616 int vl;
5617
5618 /* range check */
5619 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5620 return -1;
5621
5622 rcu_read_lock();
5623 m = rcu_dereference(dd->sdma_map);
5624 vl = m->engine_to_vl[engine];
5625 rcu_read_unlock();
5626
5627 return vl;
5628}
5629
5630/*
5631 * Translate the send context (sofware index) into a VL. Return -1 if the
5632 * translation cannot be done.
5633 */
5634static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5635{
5636 struct send_context_info *sci;
5637 struct send_context *sc;
5638 int i;
5639
5640 sci = &dd->send_contexts[sw_index];
5641
5642 /* there is no information for user (PSM) and ack contexts */
5643 if (sci->type != SC_KERNEL)
5644 return -1;
5645
5646 sc = sci->sc;
5647 if (!sc)
5648 return -1;
5649 if (dd->vld[15].sc == sc)
5650 return 15;
5651 for (i = 0; i < num_vls; i++)
5652 if (dd->vld[i].sc == sc)
5653 return i;
5654
5655 return -1;
5656}
5657
Mike Marciniszyn77241052015-07-30 15:17:43 -04005658static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5659{
5660 u64 reg_copy = reg, handled = 0;
5661 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005662 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005663
5664 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5665 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005666 else if (is_ax(dd) &&
5667 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5668 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005669 start_freeze_handling(dd->pport, 0);
5670
5671 while (reg_copy) {
5672 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005673 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005674 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005675 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005676
5677 if (port_inactive_err(shift)) {
5678 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005679 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005680 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005681 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5682
5683 handle_send_egress_err_info(dd, vl);
5684 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005685 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005686 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005687 }
5688
5689 reg &= ~handled;
5690
5691 if (reg)
5692 dd_dev_info(dd, "Egress Error: %s\n",
5693 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005694
5695 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5696 if (reg & (1ull << i))
5697 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5698 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005699}
5700
5701static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5702{
5703 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005704 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005705
5706 dd_dev_info(dd, "Send Error: %s\n",
5707 send_err_status_string(buf, sizeof(buf), reg));
5708
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005709 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5710 if (reg & (1ull << i))
5711 incr_cntr64(&dd->send_err_status_cnt[i]);
5712 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005713}
5714
5715/*
5716 * The maximum number of times the error clear down will loop before
5717 * blocking a repeating error. This value is arbitrary.
5718 */
5719#define MAX_CLEAR_COUNT 20
5720
5721/*
5722 * Clear and handle an error register. All error interrupts are funneled
5723 * through here to have a central location to correctly handle single-
5724 * or multi-shot errors.
5725 *
5726 * For non per-context registers, call this routine with a context value
5727 * of 0 so the per-context offset is zero.
5728 *
5729 * If the handler loops too many times, assume that something is wrong
5730 * and can't be fixed, so mask the error bits.
5731 */
5732static void interrupt_clear_down(struct hfi1_devdata *dd,
5733 u32 context,
5734 const struct err_reg_info *eri)
5735{
5736 u64 reg;
5737 u32 count;
5738
5739 /* read in a loop until no more errors are seen */
5740 count = 0;
5741 while (1) {
5742 reg = read_kctxt_csr(dd, context, eri->status);
5743 if (reg == 0)
5744 break;
5745 write_kctxt_csr(dd, context, eri->clear, reg);
5746 if (likely(eri->handler))
5747 eri->handler(dd, context, reg);
5748 count++;
5749 if (count > MAX_CLEAR_COUNT) {
5750 u64 mask;
5751
5752 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5753 eri->desc, reg);
5754 /*
5755 * Read-modify-write so any other masked bits
5756 * remain masked.
5757 */
5758 mask = read_kctxt_csr(dd, context, eri->mask);
5759 mask &= ~reg;
5760 write_kctxt_csr(dd, context, eri->mask, mask);
5761 break;
5762 }
5763 }
5764}
5765
5766/*
5767 * CCE block "misc" interrupt. Source is < 16.
5768 */
5769static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5770{
5771 const struct err_reg_info *eri = &misc_errs[source];
5772
5773 if (eri->handler) {
5774 interrupt_clear_down(dd, 0, eri);
5775 } else {
5776 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5777 source);
5778 }
5779}
5780
5781static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5782{
5783 return flag_string(buf, buf_len, flags,
5784 sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
5785}
5786
5787/*
5788 * Send context error interrupt. Source (hw_context) is < 160.
5789 *
5790 * All send context errors cause the send context to halt. The normal
5791 * clear-down mechanism cannot be used because we cannot clear the
5792 * error bits until several other long-running items are done first.
5793 * This is OK because with the context halted, nothing else is going
5794 * to happen on it anyway.
5795 */
5796static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5797 unsigned int hw_context)
5798{
5799 struct send_context_info *sci;
5800 struct send_context *sc;
5801 char flags[96];
5802 u64 status;
5803 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005804 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005805
5806 sw_index = dd->hw_to_sw[hw_context];
5807 if (sw_index >= dd->num_send_contexts) {
5808 dd_dev_err(dd,
5809 "out of range sw index %u for send context %u\n",
5810 sw_index, hw_context);
5811 return;
5812 }
5813 sci = &dd->send_contexts[sw_index];
5814 sc = sci->sc;
5815 if (!sc) {
5816 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5817 sw_index, hw_context);
5818 return;
5819 }
5820
5821 /* tell the software that a halt has begun */
5822 sc_stop(sc, SCF_HALTED);
5823
5824 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5825
5826 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5827 send_context_err_status_string(flags, sizeof(flags), status));
5828
5829 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005830 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005831
5832 /*
5833 * Automatically restart halted kernel contexts out of interrupt
5834 * context. User contexts must ask the driver to restart the context.
5835 */
5836 if (sc->type != SC_USER)
5837 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005838
5839 /*
5840 * Update the counters for the corresponding status bits.
5841 * Note that these particular counters are aggregated over all
5842 * 160 contexts.
5843 */
5844 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5845 if (status & (1ull << i))
5846 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5847 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005848}
5849
5850static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5851 unsigned int source, u64 status)
5852{
5853 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005854 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005855
5856 sde = &dd->per_sdma[source];
5857#ifdef CONFIG_SDMA_VERBOSITY
5858 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5859 slashstrip(__FILE__), __LINE__, __func__);
5860 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5861 sde->this_idx, source, (unsigned long long)status);
5862#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005863 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005864 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005865
5866 /*
5867 * Update the counters for the corresponding status bits.
5868 * Note that these particular counters are aggregated over
5869 * all 16 DMA engines.
5870 */
5871 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5872 if (status & (1ull << i))
5873 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5874 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005875}
5876
5877/*
5878 * CCE block SDMA error interrupt. Source is < 16.
5879 */
5880static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5881{
5882#ifdef CONFIG_SDMA_VERBOSITY
5883 struct sdma_engine *sde = &dd->per_sdma[source];
5884
5885 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5886 slashstrip(__FILE__), __LINE__, __func__);
5887 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5888 source);
5889 sdma_dumpstate(sde);
5890#endif
5891 interrupt_clear_down(dd, source, &sdma_eng_err);
5892}
5893
5894/*
5895 * CCE block "various" interrupt. Source is < 8.
5896 */
5897static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5898{
5899 const struct err_reg_info *eri = &various_err[source];
5900
5901 /*
5902 * TCritInt cannot go through interrupt_clear_down()
5903 * because it is not a second tier interrupt. The handler
5904 * should be called directly.
5905 */
5906 if (source == TCRIT_INT_SOURCE)
5907 handle_temp_err(dd);
5908 else if (eri->handler)
5909 interrupt_clear_down(dd, 0, eri);
5910 else
5911 dd_dev_info(dd,
5912 "%s: Unimplemented/reserved interrupt %d\n",
5913 __func__, source);
5914}
5915
5916static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5917{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005918 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005919 struct hfi1_pportdata *ppd = dd->pport;
5920 unsigned long flags;
5921 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5922
5923 if (reg & QSFP_HFI0_MODPRST_N) {
5924
5925 dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
5926 __func__);
5927
5928 if (!qsfp_mod_present(ppd)) {
5929 ppd->driver_link_ready = 0;
5930 /*
5931 * Cable removed, reset all our information about the
5932 * cache and cable capabilities
5933 */
5934
5935 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5936 /*
5937 * We don't set cache_refresh_required here as we expect
5938 * an interrupt when a cable is inserted
5939 */
5940 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005941 ppd->qsfp_info.reset_needed = 0;
5942 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005943 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5944 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005945 /* Invert the ModPresent pin now to detect plug-in */
5946 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5947 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08005948
5949 if ((ppd->offline_disabled_reason >
5950 HFI1_ODR_MASK(
5951 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED)) ||
5952 (ppd->offline_disabled_reason ==
5953 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5954 ppd->offline_disabled_reason =
5955 HFI1_ODR_MASK(
5956 OPA_LINKDOWN_REASONLOCAL_MEDIA_NOT_INSTALLED);
5957
Mike Marciniszyn77241052015-07-30 15:17:43 -04005958 if (ppd->host_link_state == HLS_DN_POLL) {
5959 /*
5960 * The link is still in POLL. This means
5961 * that the normal link down processing
5962 * will not happen. We have to do it here
5963 * before turning the DC off.
5964 */
5965 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5966 }
5967 } else {
5968 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5969 ppd->qsfp_info.cache_valid = 0;
5970 ppd->qsfp_info.cache_refresh_required = 1;
5971 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5972 flags);
5973
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005974 /*
5975 * Stop inversion of ModPresent pin to detect
5976 * removal of the cable
5977 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005978 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005979 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5980 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5981
5982 ppd->offline_disabled_reason =
5983 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005984 }
5985 }
5986
5987 if (reg & QSFP_HFI0_INT_N) {
5988
5989 dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
5990 __func__);
5991 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5992 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005993 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
5994 }
5995
5996 /* Schedule the QSFP work only if there is a cable attached. */
5997 if (qsfp_mod_present(ppd))
5998 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
5999}
6000
6001static int request_host_lcb_access(struct hfi1_devdata *dd)
6002{
6003 int ret;
6004
6005 ret = do_8051_command(dd, HCMD_MISC,
6006 (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6007 NULL);
6008 if (ret != HCMD_SUCCESS) {
6009 dd_dev_err(dd, "%s: command failed with error %d\n",
6010 __func__, ret);
6011 }
6012 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6013}
6014
6015static int request_8051_lcb_access(struct hfi1_devdata *dd)
6016{
6017 int ret;
6018
6019 ret = do_8051_command(dd, HCMD_MISC,
6020 (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
6021 NULL);
6022 if (ret != HCMD_SUCCESS) {
6023 dd_dev_err(dd, "%s: command failed with error %d\n",
6024 __func__, ret);
6025 }
6026 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6027}
6028
6029/*
6030 * Set the LCB selector - allow host access. The DCC selector always
6031 * points to the host.
6032 */
6033static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6034{
6035 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6036 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
6037 | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6038}
6039
6040/*
6041 * Clear the LCB selector - allow 8051 access. The DCC selector always
6042 * points to the host.
6043 */
6044static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6045{
6046 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6047 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6048}
6049
6050/*
6051 * Acquire LCB access from the 8051. If the host already has access,
6052 * just increment a counter. Otherwise, inform the 8051 that the
6053 * host is taking access.
6054 *
6055 * Returns:
6056 * 0 on success
6057 * -EBUSY if the 8051 has control and cannot be disturbed
6058 * -errno if unable to acquire access from the 8051
6059 */
6060int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6061{
6062 struct hfi1_pportdata *ppd = dd->pport;
6063 int ret = 0;
6064
6065 /*
6066 * Use the host link state lock so the operation of this routine
6067 * { link state check, selector change, count increment } can occur
6068 * as a unit against a link state change. Otherwise there is a
6069 * race between the state change and the count increment.
6070 */
6071 if (sleep_ok) {
6072 mutex_lock(&ppd->hls_lock);
6073 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006074 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006075 udelay(1);
6076 }
6077
6078 /* this access is valid only when the link is up */
6079 if ((ppd->host_link_state & HLS_UP) == 0) {
6080 dd_dev_info(dd, "%s: link state %s not up\n",
6081 __func__, link_state_name(ppd->host_link_state));
6082 ret = -EBUSY;
6083 goto done;
6084 }
6085
6086 if (dd->lcb_access_count == 0) {
6087 ret = request_host_lcb_access(dd);
6088 if (ret) {
6089 dd_dev_err(dd,
6090 "%s: unable to acquire LCB access, err %d\n",
6091 __func__, ret);
6092 goto done;
6093 }
6094 set_host_lcb_access(dd);
6095 }
6096 dd->lcb_access_count++;
6097done:
6098 mutex_unlock(&ppd->hls_lock);
6099 return ret;
6100}
6101
6102/*
6103 * Release LCB access by decrementing the use count. If the count is moving
6104 * from 1 to 0, inform 8051 that it has control back.
6105 *
6106 * Returns:
6107 * 0 on success
6108 * -errno if unable to release access to the 8051
6109 */
6110int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6111{
6112 int ret = 0;
6113
6114 /*
6115 * Use the host link state lock because the acquire needed it.
6116 * Here, we only need to keep { selector change, count decrement }
6117 * as a unit.
6118 */
6119 if (sleep_ok) {
6120 mutex_lock(&dd->pport->hls_lock);
6121 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006122 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006123 udelay(1);
6124 }
6125
6126 if (dd->lcb_access_count == 0) {
6127 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6128 __func__);
6129 goto done;
6130 }
6131
6132 if (dd->lcb_access_count == 1) {
6133 set_8051_lcb_access(dd);
6134 ret = request_8051_lcb_access(dd);
6135 if (ret) {
6136 dd_dev_err(dd,
6137 "%s: unable to release LCB access, err %d\n",
6138 __func__, ret);
6139 /* restore host access if the grant didn't work */
6140 set_host_lcb_access(dd);
6141 goto done;
6142 }
6143 }
6144 dd->lcb_access_count--;
6145done:
6146 mutex_unlock(&dd->pport->hls_lock);
6147 return ret;
6148}
6149
6150/*
6151 * Initialize LCB access variables and state. Called during driver load,
6152 * after most of the initialization is finished.
6153 *
6154 * The DC default is LCB access on for the host. The driver defaults to
6155 * leaving access to the 8051. Assign access now - this constrains the call
6156 * to this routine to be after all LCB set-up is done. In particular, after
6157 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6158 */
6159static void init_lcb_access(struct hfi1_devdata *dd)
6160{
6161 dd->lcb_access_count = 0;
6162}
6163
6164/*
6165 * Write a response back to a 8051 request.
6166 */
6167static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6168{
6169 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6170 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
6171 | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
6172 | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6173}
6174
6175/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006176 * Handle host requests from the 8051.
6177 *
6178 * This is a work-queue function outside of the interrupt.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006179 */
Easwar Hariharancbac3862016-02-03 14:31:31 -08006180void handle_8051_request(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006181{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006182 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6183 dc_host_req_work);
6184 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006185 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006186 u16 data = 0;
6187 u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6188 u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
Mike Marciniszyn77241052015-07-30 15:17:43 -04006189
6190 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6191 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6192 return; /* no request */
6193
6194 /* zero out COMPLETED so the response is seen */
6195 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6196
6197 /* extract request details */
6198 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6199 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6200 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6201 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6202
6203 switch (type) {
6204 case HREQ_LOAD_CONFIG:
6205 case HREQ_SAVE_CONFIG:
6206 case HREQ_READ_CONFIG:
6207 case HREQ_SET_TX_EQ_ABS:
6208 case HREQ_SET_TX_EQ_REL:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006209 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6210 type);
6211 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6212 break;
6213
Easwar Hariharancbac3862016-02-03 14:31:31 -08006214 case HREQ_ENABLE:
6215 lanes = data & 0xF;
6216 for (i = 0; lanes; lanes >>= 1, i++) {
6217 if (!(lanes & 1))
6218 continue;
6219 if (data & 0x200) {
6220 /* enable TX CDR */
6221 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6222 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6223 cdr_ctrl_byte |= (1 << (i + 4));
6224 } else {
6225 /* disable TX CDR */
6226 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6227 cache[QSFP_CDR_INFO_OFFS] & 0x80)
6228 cdr_ctrl_byte &= ~(1 << (i + 4));
6229 }
6230
6231 if (data & 0x800) {
6232 /* enable RX CDR */
6233 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6234 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6235 cdr_ctrl_byte |= (1 << i);
6236 } else {
6237 /* disable RX CDR */
6238 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6239 cache[QSFP_CDR_INFO_OFFS] & 0x40)
6240 cdr_ctrl_byte &= ~(1 << i);
6241 }
6242 }
6243 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6244 &cdr_ctrl_byte, 1);
6245 hreq_response(dd, HREQ_SUCCESS, data);
6246 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6247 break;
6248
Mike Marciniszyn77241052015-07-30 15:17:43 -04006249 case HREQ_CONFIG_DONE:
6250 hreq_response(dd, HREQ_SUCCESS, 0);
6251 break;
6252
6253 case HREQ_INTERFACE_TEST:
6254 hreq_response(dd, HREQ_SUCCESS, data);
6255 break;
6256
6257 default:
6258 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6259 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6260 break;
6261 }
6262}
6263
6264static void write_global_credit(struct hfi1_devdata *dd,
6265 u8 vau, u16 total, u16 shared)
6266{
6267 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6268 ((u64)total
6269 << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
6270 | ((u64)shared
6271 << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
6272 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6273}
6274
6275/*
6276 * Set up initial VL15 credits of the remote. Assumes the rest of
6277 * the CM credit registers are zero from a previous global or credit reset .
6278 */
6279void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6280{
6281 /* leave shared count at zero for both global and VL15 */
6282 write_global_credit(dd, vau, vl15buf, 0);
6283
6284 /* We may need some credits for another VL when sending packets
6285 * with the snoop interface. Dividing it down the middle for VL15
6286 * and VL0 should suffice.
6287 */
6288 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6289 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6290 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6291 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6292 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6293 } else {
6294 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6295 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6296 }
6297}
6298
6299/*
6300 * Zero all credit details from the previous connection and
6301 * reset the CM manager's internal counters.
6302 */
6303void reset_link_credits(struct hfi1_devdata *dd)
6304{
6305 int i;
6306
6307 /* remove all previous VL credit limits */
6308 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6309 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
6310 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6311 write_global_credit(dd, 0, 0, 0);
6312 /* reset the CM block */
6313 pio_send_control(dd, PSC_CM_RESET);
6314}
6315
6316/* convert a vCU to a CU */
6317static u32 vcu_to_cu(u8 vcu)
6318{
6319 return 1 << vcu;
6320}
6321
6322/* convert a CU to a vCU */
6323static u8 cu_to_vcu(u32 cu)
6324{
6325 return ilog2(cu);
6326}
6327
6328/* convert a vAU to an AU */
6329static u32 vau_to_au(u8 vau)
6330{
6331 return 8 * (1 << vau);
6332}
6333
6334static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6335{
6336 ppd->sm_trap_qp = 0x0;
6337 ppd->sa_qp = 0x1;
6338}
6339
6340/*
6341 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6342 */
6343static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6344{
6345 u64 reg;
6346
6347 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6348 write_csr(dd, DC_LCB_CFG_RUN, 0);
6349 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6350 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6351 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6352 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6353 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6354 reg = read_csr(dd, DCC_CFG_RESET);
6355 write_csr(dd, DCC_CFG_RESET,
6356 reg
6357 | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
6358 | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6359 (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6360 if (!abort) {
6361 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6362 write_csr(dd, DCC_CFG_RESET, reg);
6363 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6364 }
6365}
6366
6367/*
6368 * This routine should be called after the link has been transitioned to
6369 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6370 * reset).
6371 *
6372 * The expectation is that the caller of this routine would have taken
6373 * care of properly transitioning the link into the correct state.
6374 */
6375static void dc_shutdown(struct hfi1_devdata *dd)
6376{
6377 unsigned long flags;
6378
6379 spin_lock_irqsave(&dd->dc8051_lock, flags);
6380 if (dd->dc_shutdown) {
6381 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6382 return;
6383 }
6384 dd->dc_shutdown = 1;
6385 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6386 /* Shutdown the LCB */
6387 lcb_shutdown(dd, 1);
6388 /* Going to OFFLINE would have causes the 8051 to put the
6389 * SerDes into reset already. Just need to shut down the 8051,
6390 * itself. */
6391 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6392}
6393
6394/* Calling this after the DC has been brought out of reset should not
6395 * do any damage. */
6396static void dc_start(struct hfi1_devdata *dd)
6397{
6398 unsigned long flags;
6399 int ret;
6400
6401 spin_lock_irqsave(&dd->dc8051_lock, flags);
6402 if (!dd->dc_shutdown)
6403 goto done;
6404 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6405 /* Take the 8051 out of reset */
6406 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6407 /* Wait until 8051 is ready */
6408 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6409 if (ret) {
6410 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6411 __func__);
6412 }
6413 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6414 write_csr(dd, DCC_CFG_RESET, 0x10);
6415 /* lcb_shutdown() with abort=1 does not restore these */
6416 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6417 spin_lock_irqsave(&dd->dc8051_lock, flags);
6418 dd->dc_shutdown = 0;
6419done:
6420 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6421}
6422
6423/*
6424 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6425 */
6426static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6427{
6428 u64 rx_radr, tx_radr;
6429 u32 version;
6430
6431 if (dd->icode != ICODE_FPGA_EMULATION)
6432 return;
6433
6434 /*
6435 * These LCB defaults on emulator _s are good, nothing to do here:
6436 * LCB_CFG_TX_FIFOS_RADR
6437 * LCB_CFG_RX_FIFOS_RADR
6438 * LCB_CFG_LN_DCLK
6439 * LCB_CFG_IGNORE_LOST_RCLK
6440 */
6441 if (is_emulator_s(dd))
6442 return;
6443 /* else this is _p */
6444
6445 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006446 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006447 version = 0x2d; /* all B0 use 0x2d or higher settings */
6448
6449 if (version <= 0x12) {
6450 /* release 0x12 and below */
6451
6452 /*
6453 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6454 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6455 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6456 */
6457 rx_radr =
6458 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6459 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6460 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6461 /*
6462 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6463 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6464 */
6465 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6466 } else if (version <= 0x18) {
6467 /* release 0x13 up to 0x18 */
6468 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6469 rx_radr =
6470 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6474 } else if (version == 0x19) {
6475 /* release 0x19 */
6476 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6477 rx_radr =
6478 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6479 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6480 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6481 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6482 } else if (version == 0x1a) {
6483 /* release 0x1a */
6484 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6485 rx_radr =
6486 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6487 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6488 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6489 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6490 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6491 } else {
6492 /* release 0x1b and higher */
6493 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6494 rx_radr =
6495 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6496 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6497 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6498 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6499 }
6500
6501 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6502 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6503 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6504 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6505 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6506}
6507
6508/*
6509 * Handle a SMA idle message
6510 *
6511 * This is a work-queue function outside of the interrupt.
6512 */
6513void handle_sma_message(struct work_struct *work)
6514{
6515 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6516 sma_message_work);
6517 struct hfi1_devdata *dd = ppd->dd;
6518 u64 msg;
6519 int ret;
6520
6521 /* msg is bytes 1-4 of the 40-bit idle message - the command code
6522 is stripped off */
6523 ret = read_idle_sma(dd, &msg);
6524 if (ret)
6525 return;
6526 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6527 /*
6528 * React to the SMA message. Byte[1] (0 for us) is the command.
6529 */
6530 switch (msg & 0xff) {
6531 case SMA_IDLE_ARM:
6532 /*
6533 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6534 * State Transitions
6535 *
6536 * Only expected in INIT or ARMED, discard otherwise.
6537 */
6538 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6539 ppd->neighbor_normal = 1;
6540 break;
6541 case SMA_IDLE_ACTIVE:
6542 /*
6543 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6544 * State Transitions
6545 *
6546 * Can activate the node. Discard otherwise.
6547 */
6548 if (ppd->host_link_state == HLS_UP_ARMED
6549 && ppd->is_active_optimize_enabled) {
6550 ppd->neighbor_normal = 1;
6551 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6552 if (ret)
6553 dd_dev_err(
6554 dd,
6555 "%s: received Active SMA idle message, couldn't set link to Active\n",
6556 __func__);
6557 }
6558 break;
6559 default:
6560 dd_dev_err(dd,
6561 "%s: received unexpected SMA idle message 0x%llx\n",
6562 __func__, msg);
6563 break;
6564 }
6565}
6566
6567static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6568{
6569 u64 rcvctrl;
6570 unsigned long flags;
6571
6572 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6573 rcvctrl = read_csr(dd, RCV_CTRL);
6574 rcvctrl |= add;
6575 rcvctrl &= ~clear;
6576 write_csr(dd, RCV_CTRL, rcvctrl);
6577 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6578}
6579
6580static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6581{
6582 adjust_rcvctrl(dd, add, 0);
6583}
6584
6585static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6586{
6587 adjust_rcvctrl(dd, 0, clear);
6588}
6589
6590/*
6591 * Called from all interrupt handlers to start handling an SPC freeze.
6592 */
6593void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6594{
6595 struct hfi1_devdata *dd = ppd->dd;
6596 struct send_context *sc;
6597 int i;
6598
6599 if (flags & FREEZE_SELF)
6600 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6601
6602 /* enter frozen mode */
6603 dd->flags |= HFI1_FROZEN;
6604
6605 /* notify all SDMA engines that they are going into a freeze */
6606 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6607
6608 /* do halt pre-handling on all enabled send contexts */
6609 for (i = 0; i < dd->num_send_contexts; i++) {
6610 sc = dd->send_contexts[i].sc;
6611 if (sc && (sc->flags & SCF_ENABLED))
6612 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6613 }
6614
6615 /* Send context are frozen. Notify user space */
6616 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6617
6618 if (flags & FREEZE_ABORT) {
6619 dd_dev_err(dd,
6620 "Aborted freeze recovery. Please REBOOT system\n");
6621 return;
6622 }
6623 /* queue non-interrupt handler */
6624 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6625}
6626
6627/*
6628 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6629 * depending on the "freeze" parameter.
6630 *
6631 * No need to return an error if it times out, our only option
6632 * is to proceed anyway.
6633 */
6634static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6635{
6636 unsigned long timeout;
6637 u64 reg;
6638
6639 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6640 while (1) {
6641 reg = read_csr(dd, CCE_STATUS);
6642 if (freeze) {
6643 /* waiting until all indicators are set */
6644 if ((reg & ALL_FROZE) == ALL_FROZE)
6645 return; /* all done */
6646 } else {
6647 /* waiting until all indicators are clear */
6648 if ((reg & ALL_FROZE) == 0)
6649 return; /* all done */
6650 }
6651
6652 if (time_after(jiffies, timeout)) {
6653 dd_dev_err(dd,
6654 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6655 freeze ? "" : "un",
6656 reg & ALL_FROZE,
6657 freeze ? ALL_FROZE : 0ull);
6658 return;
6659 }
6660 usleep_range(80, 120);
6661 }
6662}
6663
6664/*
6665 * Do all freeze handling for the RXE block.
6666 */
6667static void rxe_freeze(struct hfi1_devdata *dd)
6668{
6669 int i;
6670
6671 /* disable port */
6672 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6673
6674 /* disable all receive contexts */
6675 for (i = 0; i < dd->num_rcv_contexts; i++)
6676 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6677}
6678
6679/*
6680 * Unfreeze handling for the RXE block - kernel contexts only.
6681 * This will also enable the port. User contexts will do unfreeze
6682 * handling on a per-context basis as they call into the driver.
6683 *
6684 */
6685static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6686{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006687 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006688 int i;
6689
6690 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006691 for (i = 0; i < dd->n_krcv_queues; i++) {
6692 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6693 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6694 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6695 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6696 hfi1_rcvctrl(dd, rcvmask, i);
6697 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006698
6699 /* enable port */
6700 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6701}
6702
6703/*
6704 * Non-interrupt SPC freeze handling.
6705 *
6706 * This is a work-queue function outside of the triggering interrupt.
6707 */
6708void handle_freeze(struct work_struct *work)
6709{
6710 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6711 freeze_work);
6712 struct hfi1_devdata *dd = ppd->dd;
6713
6714 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006715 wait_for_freeze_status(dd, 1);
6716
6717 /* SPC is now frozen */
6718
6719 /* do send PIO freeze steps */
6720 pio_freeze(dd);
6721
6722 /* do send DMA freeze steps */
6723 sdma_freeze(dd);
6724
6725 /* do send egress freeze steps - nothing to do */
6726
6727 /* do receive freeze steps */
6728 rxe_freeze(dd);
6729
6730 /*
6731 * Unfreeze the hardware - clear the freeze, wait for each
6732 * block's frozen bit to clear, then clear the frozen flag.
6733 */
6734 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6735 wait_for_freeze_status(dd, 0);
6736
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006737 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006738 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6739 wait_for_freeze_status(dd, 1);
6740 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6741 wait_for_freeze_status(dd, 0);
6742 }
6743
6744 /* do send PIO unfreeze steps for kernel contexts */
6745 pio_kernel_unfreeze(dd);
6746
6747 /* do send DMA unfreeze steps */
6748 sdma_unfreeze(dd);
6749
6750 /* do send egress unfreeze steps - nothing to do */
6751
6752 /* do receive unfreeze steps for kernel contexts */
6753 rxe_kernel_unfreeze(dd);
6754
6755 /*
6756 * The unfreeze procedure touches global device registers when
6757 * it disables and re-enables RXE. Mark the device unfrozen
6758 * after all that is done so other parts of the driver waiting
6759 * for the device to unfreeze don't do things out of order.
6760 *
6761 * The above implies that the meaning of HFI1_FROZEN flag is
6762 * "Device has gone into freeze mode and freeze mode handling
6763 * is still in progress."
6764 *
6765 * The flag will be removed when freeze mode processing has
6766 * completed.
6767 */
6768 dd->flags &= ~HFI1_FROZEN;
6769 wake_up(&dd->event_queue);
6770
6771 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006772}
6773
6774/*
6775 * Handle a link up interrupt from the 8051.
6776 *
6777 * This is a work-queue function outside of the interrupt.
6778 */
6779void handle_link_up(struct work_struct *work)
6780{
6781 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6782 link_up_work);
6783 set_link_state(ppd, HLS_UP_INIT);
6784
6785 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6786 read_ltp_rtt(ppd->dd);
6787 /*
6788 * OPA specifies that certain counters are cleared on a transition
6789 * to link up, so do that.
6790 */
6791 clear_linkup_counters(ppd->dd);
6792 /*
6793 * And (re)set link up default values.
6794 */
6795 set_linkup_defaults(ppd);
6796
6797 /* enforce link speed enabled */
6798 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6799 /* oops - current speed is not enabled, bounce */
6800 dd_dev_err(ppd->dd,
6801 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6802 ppd->link_speed_active, ppd->link_speed_enabled);
6803 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6804 OPA_LINKDOWN_REASON_SPEED_POLICY);
6805 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006806 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006807 start_link(ppd);
6808 }
6809}
6810
6811/* Several pieces of LNI information were cached for SMA in ppd.
6812 * Reset these on link down */
6813static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6814{
6815 ppd->neighbor_guid = 0;
6816 ppd->neighbor_port_number = 0;
6817 ppd->neighbor_type = 0;
6818 ppd->neighbor_fm_security = 0;
6819}
6820
6821/*
6822 * Handle a link down interrupt from the 8051.
6823 *
6824 * This is a work-queue function outside of the interrupt.
6825 */
6826void handle_link_down(struct work_struct *work)
6827{
6828 u8 lcl_reason, neigh_reason = 0;
6829 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6830 link_down_work);
6831
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006832 if ((ppd->host_link_state &
6833 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6834 ppd->port_type == PORT_TYPE_FIXED)
6835 ppd->offline_disabled_reason =
6836 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6837
6838 /* Go offline first, then deal with reading/writing through 8051 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006839 set_link_state(ppd, HLS_DN_OFFLINE);
6840
6841 lcl_reason = 0;
6842 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6843
6844 /*
6845 * If no reason, assume peer-initiated but missed
6846 * LinkGoingDown idle flits.
6847 */
6848 if (neigh_reason == 0)
6849 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6850
6851 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6852
6853 reset_neighbor_info(ppd);
6854
6855 /* disable the port */
6856 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6857
6858 /* If there is no cable attached, turn the DC off. Otherwise,
6859 * start the link bring up. */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006860 if (!qsfp_mod_present(ppd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006861 dc_shutdown(ppd->dd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006862 } else {
6863 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006864 start_link(ppd);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006865 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006866}
6867
6868void handle_link_bounce(struct work_struct *work)
6869{
6870 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6871 link_bounce_work);
6872
6873 /*
6874 * Only do something if the link is currently up.
6875 */
6876 if (ppd->host_link_state & HLS_UP) {
6877 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006878 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006879 start_link(ppd);
6880 } else {
6881 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6882 __func__, link_state_name(ppd->host_link_state));
6883 }
6884}
6885
6886/*
6887 * Mask conversion: Capability exchange to Port LTP. The capability
6888 * exchange has an implicit 16b CRC that is mandatory.
6889 */
6890static int cap_to_port_ltp(int cap)
6891{
6892 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6893
6894 if (cap & CAP_CRC_14B)
6895 port_ltp |= PORT_LTP_CRC_MODE_14;
6896 if (cap & CAP_CRC_48B)
6897 port_ltp |= PORT_LTP_CRC_MODE_48;
6898 if (cap & CAP_CRC_12B_16B_PER_LANE)
6899 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6900
6901 return port_ltp;
6902}
6903
6904/*
6905 * Convert an OPA Port LTP mask to capability mask
6906 */
6907int port_ltp_to_cap(int port_ltp)
6908{
6909 int cap_mask = 0;
6910
6911 if (port_ltp & PORT_LTP_CRC_MODE_14)
6912 cap_mask |= CAP_CRC_14B;
6913 if (port_ltp & PORT_LTP_CRC_MODE_48)
6914 cap_mask |= CAP_CRC_48B;
6915 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6916 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6917
6918 return cap_mask;
6919}
6920
6921/*
6922 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6923 */
6924static int lcb_to_port_ltp(int lcb_crc)
6925{
6926 int port_ltp = 0;
6927
6928 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6929 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6930 else if (lcb_crc == LCB_CRC_48B)
6931 port_ltp = PORT_LTP_CRC_MODE_48;
6932 else if (lcb_crc == LCB_CRC_14B)
6933 port_ltp = PORT_LTP_CRC_MODE_14;
6934 else
6935 port_ltp = PORT_LTP_CRC_MODE_16;
6936
6937 return port_ltp;
6938}
6939
6940/*
6941 * Our neighbor has indicated that we are allowed to act as a fabric
6942 * manager, so place the full management partition key in the second
6943 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6944 * that we should already have the limited management partition key in
6945 * array element 1, and also that the port is not yet up when
6946 * add_full_mgmt_pkey() is invoked.
6947 */
6948static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6949{
6950 struct hfi1_devdata *dd = ppd->dd;
6951
Dean Luick87645222015-12-01 15:38:21 -05006952 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6953 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6954 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6955 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006956 ppd->pkeys[2] = FULL_MGMT_P_KEY;
6957 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6958}
6959
6960/*
6961 * Convert the given link width to the OPA link width bitmask.
6962 */
6963static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6964{
6965 switch (width) {
6966 case 0:
6967 /*
6968 * Simulator and quick linkup do not set the width.
6969 * Just set it to 4x without complaint.
6970 */
6971 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
6972 return OPA_LINK_WIDTH_4X;
6973 return 0; /* no lanes up */
6974 case 1: return OPA_LINK_WIDTH_1X;
6975 case 2: return OPA_LINK_WIDTH_2X;
6976 case 3: return OPA_LINK_WIDTH_3X;
6977 default:
6978 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
6979 __func__, width);
6980 /* fall through */
6981 case 4: return OPA_LINK_WIDTH_4X;
6982 }
6983}
6984
6985/*
6986 * Do a population count on the bottom nibble.
6987 */
6988static const u8 bit_counts[16] = {
6989 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
6990};
6991static inline u8 nibble_to_count(u8 nibble)
6992{
6993 return bit_counts[nibble & 0xf];
6994}
6995
6996/*
6997 * Read the active lane information from the 8051 registers and return
6998 * their widths.
6999 *
7000 * Active lane information is found in these 8051 registers:
7001 * enable_lane_tx
7002 * enable_lane_rx
7003 */
7004static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7005 u16 *rx_width)
7006{
7007 u16 tx, rx;
7008 u8 enable_lane_rx;
7009 u8 enable_lane_tx;
7010 u8 tx_polarity_inversion;
7011 u8 rx_polarity_inversion;
7012 u8 max_rate;
7013
7014 /* read the active lanes */
7015 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7016 &rx_polarity_inversion, &max_rate);
7017 read_local_lni(dd, &enable_lane_rx);
7018
7019 /* convert to counts */
7020 tx = nibble_to_count(enable_lane_tx);
7021 rx = nibble_to_count(enable_lane_rx);
7022
7023 /*
7024 * Set link_speed_active here, overriding what was set in
7025 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7026 * set the max_rate field in handle_verify_cap until v0.19.
7027 */
7028 if ((dd->icode == ICODE_RTL_SILICON)
7029 && (dd->dc8051_ver < dc8051_ver(0, 19))) {
7030 /* max_rate: 0 = 12.5G, 1 = 25G */
7031 switch (max_rate) {
7032 case 0:
7033 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7034 break;
7035 default:
7036 dd_dev_err(dd,
7037 "%s: unexpected max rate %d, using 25Gb\n",
7038 __func__, (int)max_rate);
7039 /* fall through */
7040 case 1:
7041 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7042 break;
7043 }
7044 }
7045
7046 dd_dev_info(dd,
7047 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7048 enable_lane_tx, tx, enable_lane_rx, rx);
7049 *tx_width = link_width_to_bits(dd, tx);
7050 *rx_width = link_width_to_bits(dd, rx);
7051}
7052
7053/*
7054 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7055 * Valid after the end of VerifyCap and during LinkUp. Does not change
7056 * after link up. I.e. look elsewhere for downgrade information.
7057 *
7058 * Bits are:
7059 * + bits [7:4] contain the number of active transmitters
7060 * + bits [3:0] contain the number of active receivers
7061 * These are numbers 1 through 4 and can be different values if the
7062 * link is asymmetric.
7063 *
7064 * verify_cap_local_fm_link_width[0] retains its original value.
7065 */
7066static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7067 u16 *rx_width)
7068{
7069 u16 widths, tx, rx;
7070 u8 misc_bits, local_flags;
7071 u16 active_tx, active_rx;
7072
7073 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7074 tx = widths >> 12;
7075 rx = (widths >> 8) & 0xf;
7076
7077 *tx_width = link_width_to_bits(dd, tx);
7078 *rx_width = link_width_to_bits(dd, rx);
7079
7080 /* print the active widths */
7081 get_link_widths(dd, &active_tx, &active_rx);
7082}
7083
7084/*
7085 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7086 * hardware information when the link first comes up.
7087 *
7088 * The link width is not available until after VerifyCap.AllFramesReceived
7089 * (the trigger for handle_verify_cap), so this is outside that routine
7090 * and should be called when the 8051 signals linkup.
7091 */
7092void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7093{
7094 u16 tx_width, rx_width;
7095
7096 /* get end-of-LNI link widths */
7097 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7098
7099 /* use tx_width as the link is supposed to be symmetric on link up */
7100 ppd->link_width_active = tx_width;
7101 /* link width downgrade active (LWD.A) starts out matching LW.A */
7102 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7103 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7104 /* per OPA spec, on link up LWD.E resets to LWD.S */
7105 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7106 /* cache the active egress rate (units {10^6 bits/sec]) */
7107 ppd->current_egress_rate = active_egress_rate(ppd);
7108}
7109
7110/*
7111 * Handle a verify capabilities interrupt from the 8051.
7112 *
7113 * This is a work-queue function outside of the interrupt.
7114 */
7115void handle_verify_cap(struct work_struct *work)
7116{
7117 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7118 link_vc_work);
7119 struct hfi1_devdata *dd = ppd->dd;
7120 u64 reg;
7121 u8 power_management;
7122 u8 continious;
7123 u8 vcu;
7124 u8 vau;
7125 u8 z;
7126 u16 vl15buf;
7127 u16 link_widths;
7128 u16 crc_mask;
7129 u16 crc_val;
7130 u16 device_id;
7131 u16 active_tx, active_rx;
7132 u8 partner_supported_crc;
7133 u8 remote_tx_rate;
7134 u8 device_rev;
7135
7136 set_link_state(ppd, HLS_VERIFY_CAP);
7137
7138 lcb_shutdown(dd, 0);
7139 adjust_lcb_for_fpga_serdes(dd);
7140
7141 /*
7142 * These are now valid:
7143 * remote VerifyCap fields in the general LNI config
7144 * CSR DC8051_STS_REMOTE_GUID
7145 * CSR DC8051_STS_REMOTE_NODE_TYPE
7146 * CSR DC8051_STS_REMOTE_FM_SECURITY
7147 * CSR DC8051_STS_REMOTE_PORT_NO
7148 */
7149
7150 read_vc_remote_phy(dd, &power_management, &continious);
7151 read_vc_remote_fabric(
7152 dd,
7153 &vau,
7154 &z,
7155 &vcu,
7156 &vl15buf,
7157 &partner_supported_crc);
7158 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7159 read_remote_device_id(dd, &device_id, &device_rev);
7160 /*
7161 * And the 'MgmtAllowed' information, which is exchanged during
7162 * LNI, is also be available at this point.
7163 */
7164 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7165 /* print the active widths */
7166 get_link_widths(dd, &active_tx, &active_rx);
7167 dd_dev_info(dd,
7168 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7169 (int)power_management, (int)continious);
7170 dd_dev_info(dd,
7171 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7172 (int)vau,
7173 (int)z,
7174 (int)vcu,
7175 (int)vl15buf,
7176 (int)partner_supported_crc);
7177 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7178 (u32)remote_tx_rate, (u32)link_widths);
7179 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7180 (u32)device_id, (u32)device_rev);
7181 /*
7182 * The peer vAU value just read is the peer receiver value. HFI does
7183 * not support a transmit vAU of 0 (AU == 8). We advertised that
7184 * with Z=1 in the fabric capabilities sent to the peer. The peer
7185 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7186 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7187 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7188 * subject to the Z value exception.
7189 */
7190 if (vau == 0)
7191 vau = 1;
7192 set_up_vl15(dd, vau, vl15buf);
7193
7194 /* set up the LCB CRC mode */
7195 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7196
7197 /* order is important: use the lowest bit in common */
7198 if (crc_mask & CAP_CRC_14B)
7199 crc_val = LCB_CRC_14B;
7200 else if (crc_mask & CAP_CRC_48B)
7201 crc_val = LCB_CRC_48B;
7202 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7203 crc_val = LCB_CRC_12B_16B_PER_LANE;
7204 else
7205 crc_val = LCB_CRC_16B;
7206
7207 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7208 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7209 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7210
7211 /* set (14b only) or clear sideband credit */
7212 reg = read_csr(dd, SEND_CM_CTRL);
7213 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7214 write_csr(dd, SEND_CM_CTRL,
7215 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7216 } else {
7217 write_csr(dd, SEND_CM_CTRL,
7218 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7219 }
7220
7221 ppd->link_speed_active = 0; /* invalid value */
7222 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7223 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7224 switch (remote_tx_rate) {
7225 case 0:
7226 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7227 break;
7228 case 1:
7229 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7230 break;
7231 }
7232 } else {
7233 /* actual rate is highest bit of the ANDed rates */
7234 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7235
7236 if (rate & 2)
7237 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7238 else if (rate & 1)
7239 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7240 }
7241 if (ppd->link_speed_active == 0) {
7242 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7243 __func__, (int)remote_tx_rate);
7244 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7245 }
7246
7247 /*
7248 * Cache the values of the supported, enabled, and active
7249 * LTP CRC modes to return in 'portinfo' queries. But the bit
7250 * flags that are returned in the portinfo query differ from
7251 * what's in the link_crc_mask, crc_sizes, and crc_val
7252 * variables. Convert these here.
7253 */
7254 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7255 /* supported crc modes */
7256 ppd->port_ltp_crc_mode |=
7257 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7258 /* enabled crc modes */
7259 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7260 /* active crc mode */
7261
7262 /* set up the remote credit return table */
7263 assign_remote_cm_au_table(dd, vcu);
7264
7265 /*
7266 * The LCB is reset on entry to handle_verify_cap(), so this must
7267 * be applied on every link up.
7268 *
7269 * Adjust LCB error kill enable to kill the link if
7270 * these RBUF errors are seen:
7271 * REPLAY_BUF_MBE_SMASK
7272 * FLIT_INPUT_BUF_MBE_SMASK
7273 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007274 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007275 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7276 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7277 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7278 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7279 }
7280
7281 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7282 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7283
7284 /* give 8051 access to the LCB CSRs */
7285 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7286 set_8051_lcb_access(dd);
7287
7288 ppd->neighbor_guid =
7289 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7290 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7291 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7292 ppd->neighbor_type =
7293 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7294 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7295 ppd->neighbor_fm_security =
7296 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7297 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7298 dd_dev_info(dd,
7299 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7300 ppd->neighbor_guid, ppd->neighbor_type,
7301 ppd->mgmt_allowed, ppd->neighbor_fm_security);
7302 if (ppd->mgmt_allowed)
7303 add_full_mgmt_pkey(ppd);
7304
7305 /* tell the 8051 to go to LinkUp */
7306 set_link_state(ppd, HLS_GOING_UP);
7307}
7308
7309/*
7310 * Apply the link width downgrade enabled policy against the current active
7311 * link widths.
7312 *
7313 * Called when the enabled policy changes or the active link widths change.
7314 */
7315void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7316{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007317 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007318 int tries;
7319 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007320 u16 tx, rx;
7321
Dean Luick323fd782015-11-16 21:59:24 -05007322 /* use the hls lock to avoid a race with actual link up */
7323 tries = 0;
7324retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007325 mutex_lock(&ppd->hls_lock);
7326 /* only apply if the link is up */
Dean Luick323fd782015-11-16 21:59:24 -05007327 if (!(ppd->host_link_state & HLS_UP)) {
7328 /* still going up..wait and retry */
7329 if (ppd->host_link_state & HLS_GOING_UP) {
7330 if (++tries < 1000) {
7331 mutex_unlock(&ppd->hls_lock);
7332 usleep_range(100, 120); /* arbitrary */
7333 goto retry;
7334 }
7335 dd_dev_err(ppd->dd,
7336 "%s: giving up waiting for link state change\n",
7337 __func__);
7338 }
7339 goto done;
7340 }
7341
7342 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007343
7344 if (refresh_widths) {
7345 get_link_widths(ppd->dd, &tx, &rx);
7346 ppd->link_width_downgrade_tx_active = tx;
7347 ppd->link_width_downgrade_rx_active = rx;
7348 }
7349
7350 if (lwde == 0) {
7351 /* downgrade is disabled */
7352
7353 /* bounce if not at starting active width */
7354 if ((ppd->link_width_active !=
7355 ppd->link_width_downgrade_tx_active)
7356 || (ppd->link_width_active !=
7357 ppd->link_width_downgrade_rx_active)) {
7358 dd_dev_err(ppd->dd,
7359 "Link downgrade is disabled and link has downgraded, downing link\n");
7360 dd_dev_err(ppd->dd,
7361 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7362 ppd->link_width_active,
7363 ppd->link_width_downgrade_tx_active,
7364 ppd->link_width_downgrade_rx_active);
7365 do_bounce = 1;
7366 }
7367 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
7368 || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7369 /* Tx or Rx is outside the enabled policy */
7370 dd_dev_err(ppd->dd,
7371 "Link is outside of downgrade allowed, downing link\n");
7372 dd_dev_err(ppd->dd,
7373 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7374 lwde,
7375 ppd->link_width_downgrade_tx_active,
7376 ppd->link_width_downgrade_rx_active);
7377 do_bounce = 1;
7378 }
7379
Dean Luick323fd782015-11-16 21:59:24 -05007380done:
7381 mutex_unlock(&ppd->hls_lock);
7382
Mike Marciniszyn77241052015-07-30 15:17:43 -04007383 if (do_bounce) {
7384 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7385 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7386 set_link_state(ppd, HLS_DN_OFFLINE);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08007387 tune_serdes(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007388 start_link(ppd);
7389 }
7390}
7391
7392/*
7393 * Handle a link downgrade interrupt from the 8051.
7394 *
7395 * This is a work-queue function outside of the interrupt.
7396 */
7397void handle_link_downgrade(struct work_struct *work)
7398{
7399 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7400 link_downgrade_work);
7401
7402 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7403 apply_link_downgrade_policy(ppd, 1);
7404}
7405
7406static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7407{
7408 return flag_string(buf, buf_len, flags, dcc_err_flags,
7409 ARRAY_SIZE(dcc_err_flags));
7410}
7411
7412static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7413{
7414 return flag_string(buf, buf_len, flags, lcb_err_flags,
7415 ARRAY_SIZE(lcb_err_flags));
7416}
7417
7418static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7419{
7420 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7421 ARRAY_SIZE(dc8051_err_flags));
7422}
7423
7424static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7425{
7426 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7427 ARRAY_SIZE(dc8051_info_err_flags));
7428}
7429
7430static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7431{
7432 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7433 ARRAY_SIZE(dc8051_info_host_msg_flags));
7434}
7435
7436static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7437{
7438 struct hfi1_pportdata *ppd = dd->pport;
7439 u64 info, err, host_msg;
7440 int queue_link_down = 0;
7441 char buf[96];
7442
7443 /* look at the flags */
7444 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7445 /* 8051 information set by firmware */
7446 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7447 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7448 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7449 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7450 host_msg = (info >>
7451 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7452 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7453
7454 /*
7455 * Handle error flags.
7456 */
7457 if (err & FAILED_LNI) {
7458 /*
7459 * LNI error indications are cleared by the 8051
7460 * only when starting polling. Only pay attention
7461 * to them when in the states that occur during
7462 * LNI.
7463 */
7464 if (ppd->host_link_state
7465 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7466 queue_link_down = 1;
7467 dd_dev_info(dd, "Link error: %s\n",
7468 dc8051_info_err_string(buf,
7469 sizeof(buf),
7470 err & FAILED_LNI));
7471 }
7472 err &= ~(u64)FAILED_LNI;
7473 }
Dean Luick6d014532015-12-01 15:38:23 -05007474 /* unknown frames can happen durning LNI, just count */
7475 if (err & UNKNOWN_FRAME) {
7476 ppd->unknown_frame_count++;
7477 err &= ~(u64)UNKNOWN_FRAME;
7478 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007479 if (err) {
7480 /* report remaining errors, but do not do anything */
7481 dd_dev_err(dd, "8051 info error: %s\n",
7482 dc8051_info_err_string(buf, sizeof(buf), err));
7483 }
7484
7485 /*
7486 * Handle host message flags.
7487 */
7488 if (host_msg & HOST_REQ_DONE) {
7489 /*
7490 * Presently, the driver does a busy wait for
7491 * host requests to complete. This is only an
7492 * informational message.
7493 * NOTE: The 8051 clears the host message
7494 * information *on the next 8051 command*.
7495 * Therefore, when linkup is achieved,
7496 * this flag will still be set.
7497 */
7498 host_msg &= ~(u64)HOST_REQ_DONE;
7499 }
7500 if (host_msg & BC_SMA_MSG) {
7501 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7502 host_msg &= ~(u64)BC_SMA_MSG;
7503 }
7504 if (host_msg & LINKUP_ACHIEVED) {
7505 dd_dev_info(dd, "8051: Link up\n");
7506 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7507 host_msg &= ~(u64)LINKUP_ACHIEVED;
7508 }
7509 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharancbac3862016-02-03 14:31:31 -08007510 queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007511 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7512 }
7513 if (host_msg & VERIFY_CAP_FRAME) {
7514 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7515 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7516 }
7517 if (host_msg & LINK_GOING_DOWN) {
7518 const char *extra = "";
7519 /* no downgrade action needed if going down */
7520 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7521 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7522 extra = " (ignoring downgrade)";
7523 }
7524 dd_dev_info(dd, "8051: Link down%s\n", extra);
7525 queue_link_down = 1;
7526 host_msg &= ~(u64)LINK_GOING_DOWN;
7527 }
7528 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7529 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7530 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7531 }
7532 if (host_msg) {
7533 /* report remaining messages, but do not do anything */
7534 dd_dev_info(dd, "8051 info host message: %s\n",
7535 dc8051_info_host_msg_string(buf, sizeof(buf),
7536 host_msg));
7537 }
7538
7539 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7540 }
7541 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7542 /*
7543 * Lost the 8051 heartbeat. If this happens, we
7544 * receive constant interrupts about it. Disable
7545 * the interrupt after the first.
7546 */
7547 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7548 write_csr(dd, DC_DC8051_ERR_EN,
7549 read_csr(dd, DC_DC8051_ERR_EN)
7550 & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7551
7552 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7553 }
7554 if (reg) {
7555 /* report the error, but do not do anything */
7556 dd_dev_err(dd, "8051 error: %s\n",
7557 dc8051_err_string(buf, sizeof(buf), reg));
7558 }
7559
7560 if (queue_link_down) {
7561 /* if the link is already going down or disabled, do not
7562 * queue another */
7563 if ((ppd->host_link_state
7564 & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
7565 || ppd->link_enabled == 0) {
7566 dd_dev_info(dd, "%s: not queuing link down\n",
7567 __func__);
7568 } else {
7569 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7570 }
7571 }
7572}
7573
7574static const char * const fm_config_txt[] = {
7575[0] =
7576 "BadHeadDist: Distance violation between two head flits",
7577[1] =
7578 "BadTailDist: Distance violation between two tail flits",
7579[2] =
7580 "BadCtrlDist: Distance violation between two credit control flits",
7581[3] =
7582 "BadCrdAck: Credits return for unsupported VL",
7583[4] =
7584 "UnsupportedVLMarker: Received VL Marker",
7585[5] =
7586 "BadPreempt: Exceeded the preemption nesting level",
7587[6] =
7588 "BadControlFlit: Received unsupported control flit",
7589/* no 7 */
7590[8] =
7591 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7592};
7593
7594static const char * const port_rcv_txt[] = {
7595[1] =
7596 "BadPktLen: Illegal PktLen",
7597[2] =
7598 "PktLenTooLong: Packet longer than PktLen",
7599[3] =
7600 "PktLenTooShort: Packet shorter than PktLen",
7601[4] =
7602 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7603[5] =
7604 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7605[6] =
7606 "BadL2: Illegal L2 opcode",
7607[7] =
7608 "BadSC: Unsupported SC",
7609[9] =
7610 "BadRC: Illegal RC",
7611[11] =
7612 "PreemptError: Preempting with same VL",
7613[12] =
7614 "PreemptVL15: Preempting a VL15 packet",
7615};
7616
7617#define OPA_LDR_FMCONFIG_OFFSET 16
7618#define OPA_LDR_PORTRCV_OFFSET 0
7619static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7620{
7621 u64 info, hdr0, hdr1;
7622 const char *extra;
7623 char buf[96];
7624 struct hfi1_pportdata *ppd = dd->pport;
7625 u8 lcl_reason = 0;
7626 int do_bounce = 0;
7627
7628 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7629 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7630 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7631 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7632 /* set status bit */
7633 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7634 }
7635 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7636 }
7637
7638 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7639 struct hfi1_pportdata *ppd = dd->pport;
7640 /* this counter saturates at (2^32) - 1 */
7641 if (ppd->link_downed < (u32)UINT_MAX)
7642 ppd->link_downed++;
7643 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7644 }
7645
7646 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7647 u8 reason_valid = 1;
7648
7649 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7650 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7651 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7652 /* set status bit */
7653 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7654 }
7655 switch (info) {
7656 case 0:
7657 case 1:
7658 case 2:
7659 case 3:
7660 case 4:
7661 case 5:
7662 case 6:
7663 extra = fm_config_txt[info];
7664 break;
7665 case 8:
7666 extra = fm_config_txt[info];
7667 if (ppd->port_error_action &
7668 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7669 do_bounce = 1;
7670 /*
7671 * lcl_reason cannot be derived from info
7672 * for this error
7673 */
7674 lcl_reason =
7675 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7676 }
7677 break;
7678 default:
7679 reason_valid = 0;
7680 snprintf(buf, sizeof(buf), "reserved%lld", info);
7681 extra = buf;
7682 break;
7683 }
7684
7685 if (reason_valid && !do_bounce) {
7686 do_bounce = ppd->port_error_action &
7687 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7688 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7689 }
7690
7691 /* just report this */
7692 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7693 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7694 }
7695
7696 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7697 u8 reason_valid = 1;
7698
7699 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7700 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7701 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7702 if (!(dd->err_info_rcvport.status_and_code &
7703 OPA_EI_STATUS_SMASK)) {
7704 dd->err_info_rcvport.status_and_code =
7705 info & OPA_EI_CODE_SMASK;
7706 /* set status bit */
7707 dd->err_info_rcvport.status_and_code |=
7708 OPA_EI_STATUS_SMASK;
7709 /* save first 2 flits in the packet that caused
7710 * the error */
7711 dd->err_info_rcvport.packet_flit1 = hdr0;
7712 dd->err_info_rcvport.packet_flit2 = hdr1;
7713 }
7714 switch (info) {
7715 case 1:
7716 case 2:
7717 case 3:
7718 case 4:
7719 case 5:
7720 case 6:
7721 case 7:
7722 case 9:
7723 case 11:
7724 case 12:
7725 extra = port_rcv_txt[info];
7726 break;
7727 default:
7728 reason_valid = 0;
7729 snprintf(buf, sizeof(buf), "reserved%lld", info);
7730 extra = buf;
7731 break;
7732 }
7733
7734 if (reason_valid && !do_bounce) {
7735 do_bounce = ppd->port_error_action &
7736 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7737 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7738 }
7739
7740 /* just report this */
7741 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7742 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
7743 hdr0, hdr1);
7744
7745 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7746 }
7747
7748 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7749 /* informative only */
7750 dd_dev_info(dd, "8051 access to LCB blocked\n");
7751 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7752 }
7753 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7754 /* informative only */
7755 dd_dev_info(dd, "host access to LCB blocked\n");
7756 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7757 }
7758
7759 /* report any remaining errors */
7760 if (reg)
7761 dd_dev_info(dd, "DCC Error: %s\n",
7762 dcc_err_string(buf, sizeof(buf), reg));
7763
7764 if (lcl_reason == 0)
7765 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7766
7767 if (do_bounce) {
7768 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7769 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7770 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7771 }
7772}
7773
7774static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7775{
7776 char buf[96];
7777
7778 dd_dev_info(dd, "LCB Error: %s\n",
7779 lcb_err_string(buf, sizeof(buf), reg));
7780}
7781
7782/*
7783 * CCE block DC interrupt. Source is < 8.
7784 */
7785static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7786{
7787 const struct err_reg_info *eri = &dc_errs[source];
7788
7789 if (eri->handler) {
7790 interrupt_clear_down(dd, 0, eri);
7791 } else if (source == 3 /* dc_lbm_int */) {
7792 /*
7793 * This indicates that a parity error has occurred on the
7794 * address/control lines presented to the LBM. The error
7795 * is a single pulse, there is no associated error flag,
7796 * and it is non-maskable. This is because if a parity
7797 * error occurs on the request the request is dropped.
7798 * This should never occur, but it is nice to know if it
7799 * ever does.
7800 */
7801 dd_dev_err(dd, "Parity error in DC LBM block\n");
7802 } else {
7803 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7804 }
7805}
7806
7807/*
7808 * TX block send credit interrupt. Source is < 160.
7809 */
7810static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7811{
7812 sc_group_release_update(dd, source);
7813}
7814
7815/*
7816 * TX block SDMA interrupt. Source is < 48.
7817 *
7818 * SDMA interrupts are grouped by type:
7819 *
7820 * 0 - N-1 = SDma
7821 * N - 2N-1 = SDmaProgress
7822 * 2N - 3N-1 = SDmaIdle
7823 */
7824static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7825{
7826 /* what interrupt */
7827 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7828 /* which engine */
7829 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7830
7831#ifdef CONFIG_SDMA_VERBOSITY
7832 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7833 slashstrip(__FILE__), __LINE__, __func__);
7834 sdma_dumpstate(&dd->per_sdma[which]);
7835#endif
7836
7837 if (likely(what < 3 && which < dd->num_sdma)) {
7838 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7839 } else {
7840 /* should not happen */
7841 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7842 }
7843}
7844
7845/*
7846 * RX block receive available interrupt. Source is < 160.
7847 */
7848static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7849{
7850 struct hfi1_ctxtdata *rcd;
7851 char *err_detail;
7852
7853 if (likely(source < dd->num_rcv_contexts)) {
7854 rcd = dd->rcd[source];
7855 if (rcd) {
7856 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007857 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007858 else
7859 handle_user_interrupt(rcd);
7860 return; /* OK */
7861 }
7862 /* received an interrupt, but no rcd */
7863 err_detail = "dataless";
7864 } else {
7865 /* received an interrupt, but are not using that context */
7866 err_detail = "out of range";
7867 }
7868 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7869 err_detail, source);
7870}
7871
7872/*
7873 * RX block receive urgent interrupt. Source is < 160.
7874 */
7875static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7876{
7877 struct hfi1_ctxtdata *rcd;
7878 char *err_detail;
7879
7880 if (likely(source < dd->num_rcv_contexts)) {
7881 rcd = dd->rcd[source];
7882 if (rcd) {
7883 /* only pay attention to user urgent interrupts */
7884 if (source >= dd->first_user_ctxt)
7885 handle_user_interrupt(rcd);
7886 return; /* OK */
7887 }
7888 /* received an interrupt, but no rcd */
7889 err_detail = "dataless";
7890 } else {
7891 /* received an interrupt, but are not using that context */
7892 err_detail = "out of range";
7893 }
7894 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7895 err_detail, source);
7896}
7897
7898/*
7899 * Reserved range interrupt. Should not be called in normal operation.
7900 */
7901static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7902{
7903 char name[64];
7904
7905 dd_dev_err(dd, "unexpected %s interrupt\n",
7906 is_reserved_name(name, sizeof(name), source));
7907}
7908
7909static const struct is_table is_table[] = {
7910/* start end
7911 name func interrupt func */
7912{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
7913 is_misc_err_name, is_misc_err_int },
7914{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
7915 is_sdma_eng_err_name, is_sdma_eng_err_int },
7916{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7917 is_sendctxt_err_name, is_sendctxt_err_int },
7918{ IS_SDMA_START, IS_SDMA_END,
7919 is_sdma_eng_name, is_sdma_eng_int },
7920{ IS_VARIOUS_START, IS_VARIOUS_END,
7921 is_various_name, is_various_int },
7922{ IS_DC_START, IS_DC_END,
7923 is_dc_name, is_dc_int },
7924{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
7925 is_rcv_avail_name, is_rcv_avail_int },
7926{ IS_RCVURGENT_START, IS_RCVURGENT_END,
7927 is_rcv_urgent_name, is_rcv_urgent_int },
7928{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
7929 is_send_credit_name, is_send_credit_int},
7930{ IS_RESERVED_START, IS_RESERVED_END,
7931 is_reserved_name, is_reserved_int},
7932};
7933
7934/*
7935 * Interrupt source interrupt - called when the given source has an interrupt.
7936 * Source is a bit index into an array of 64-bit integers.
7937 */
7938static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7939{
7940 const struct is_table *entry;
7941
7942 /* avoids a double compare by walking the table in-order */
7943 for (entry = &is_table[0]; entry->is_name; entry++) {
7944 if (source < entry->end) {
7945 trace_hfi1_interrupt(dd, entry, source);
7946 entry->is_int(dd, source - entry->start);
7947 return;
7948 }
7949 }
7950 /* fell off the end */
7951 dd_dev_err(dd, "invalid interrupt source %u\n", source);
7952}
7953
7954/*
7955 * General interrupt handler. This is able to correctly handle
7956 * all interrupts in case INTx is used.
7957 */
7958static irqreturn_t general_interrupt(int irq, void *data)
7959{
7960 struct hfi1_devdata *dd = data;
7961 u64 regs[CCE_NUM_INT_CSRS];
7962 u32 bit;
7963 int i;
7964
7965 this_cpu_inc(*dd->int_counter);
7966
7967 /* phase 1: scan and clear all handled interrupts */
7968 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
7969 if (dd->gi_mask[i] == 0) {
7970 regs[i] = 0; /* used later */
7971 continue;
7972 }
7973 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
7974 dd->gi_mask[i];
7975 /* only clear if anything is set */
7976 if (regs[i])
7977 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
7978 }
7979
7980 /* phase 2: call the appropriate handler */
7981 for_each_set_bit(bit, (unsigned long *)&regs[0],
7982 CCE_NUM_INT_CSRS*64) {
7983 is_interrupt(dd, bit);
7984 }
7985
7986 return IRQ_HANDLED;
7987}
7988
7989static irqreturn_t sdma_interrupt(int irq, void *data)
7990{
7991 struct sdma_engine *sde = data;
7992 struct hfi1_devdata *dd = sde->dd;
7993 u64 status;
7994
7995#ifdef CONFIG_SDMA_VERBOSITY
7996 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
7997 slashstrip(__FILE__), __LINE__, __func__);
7998 sdma_dumpstate(sde);
7999#endif
8000
8001 this_cpu_inc(*dd->int_counter);
8002
8003 /* This read_csr is really bad in the hot path */
8004 status = read_csr(dd,
8005 CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
8006 & sde->imask;
8007 if (likely(status)) {
8008 /* clear the interrupt(s) */
8009 write_csr(dd,
8010 CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
8011 status);
8012
8013 /* handle the interrupt(s) */
8014 sdma_engine_interrupt(sde, status);
8015 } else
8016 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8017 sde->this_idx);
8018
8019 return IRQ_HANDLED;
8020}
8021
8022/*
Dean Luickf4f30031c2015-10-26 10:28:44 -04008023 * Clear the receive interrupt, forcing the write and making sure
8024 * we have data from the chip, pushing everything in front of it
8025 * back to the host.
8026 */
8027static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8028{
8029 struct hfi1_devdata *dd = rcd->dd;
8030 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8031
8032 mmiowb(); /* make sure everything before is written */
8033 write_csr(dd, addr, rcd->imask);
8034 /* force the above write on the chip and get a value back */
8035 (void)read_csr(dd, addr);
8036}
8037
8038/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008039void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008040{
8041 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8042}
8043
8044/* return non-zero if a packet is present */
8045static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8046{
8047 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8048 return (rcd->seq_cnt ==
8049 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8050
8051 /* else is RDMA rtail */
8052 return (rcd->head != get_rcvhdrtail(rcd));
8053}
8054
8055/*
8056 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8057 * This routine will try to handle packets immediately (latency), but if
8058 * it finds too many, it will invoke the thread handler (bandwitdh). The
8059 * chip receive interupt is *not* cleared down until this or the thread (if
8060 * invoked) is finished. The intent is to avoid extra interrupts while we
8061 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008062 */
8063static irqreturn_t receive_context_interrupt(int irq, void *data)
8064{
8065 struct hfi1_ctxtdata *rcd = data;
8066 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008067 int disposition;
8068 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008069
8070 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8071 this_cpu_inc(*dd->int_counter);
8072
Dean Luickf4f30031c2015-10-26 10:28:44 -04008073 /* receive interrupt remains blocked while processing packets */
8074 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008075
Dean Luickf4f30031c2015-10-26 10:28:44 -04008076 /*
8077 * Too many packets were seen while processing packets in this
8078 * IRQ handler. Invoke the handler thread. The receive interrupt
8079 * remains blocked.
8080 */
8081 if (disposition == RCV_PKT_LIMIT)
8082 return IRQ_WAKE_THREAD;
8083
8084 /*
8085 * The packet processor detected no more packets. Clear the receive
8086 * interrupt and recheck for a packet packet that may have arrived
8087 * after the previous check and interrupt clear. If a packet arrived,
8088 * force another interrupt.
8089 */
8090 clear_recv_intr(rcd);
8091 present = check_packet_present(rcd);
8092 if (present)
8093 force_recv_intr(rcd);
8094
8095 return IRQ_HANDLED;
8096}
8097
8098/*
8099 * Receive packet thread handler. This expects to be invoked with the
8100 * receive interrupt still blocked.
8101 */
8102static irqreturn_t receive_context_thread(int irq, void *data)
8103{
8104 struct hfi1_ctxtdata *rcd = data;
8105 int present;
8106
8107 /* receive interrupt is still blocked from the IRQ handler */
8108 (void)rcd->do_interrupt(rcd, 1);
8109
8110 /*
8111 * The packet processor will only return if it detected no more
8112 * packets. Hold IRQs here so we can safely clear the interrupt and
8113 * recheck for a packet that may have arrived after the previous
8114 * check and the interrupt clear. If a packet arrived, force another
8115 * interrupt.
8116 */
8117 local_irq_disable();
8118 clear_recv_intr(rcd);
8119 present = check_packet_present(rcd);
8120 if (present)
8121 force_recv_intr(rcd);
8122 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008123
8124 return IRQ_HANDLED;
8125}
8126
8127/* ========================================================================= */
8128
8129u32 read_physical_state(struct hfi1_devdata *dd)
8130{
8131 u64 reg;
8132
8133 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8134 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8135 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8136}
8137
Jim Snowfb9036d2016-01-11 18:32:21 -05008138u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008139{
8140 u64 reg;
8141
8142 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8143 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8144 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8145}
8146
8147static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8148{
8149 u64 reg;
8150
8151 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8152 /* clear current state, set new state */
8153 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8154 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8155 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8156}
8157
8158/*
8159 * Use the 8051 to read a LCB CSR.
8160 */
8161static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8162{
8163 u32 regno;
8164 int ret;
8165
8166 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8167 if (acquire_lcb_access(dd, 0) == 0) {
8168 *data = read_csr(dd, addr);
8169 release_lcb_access(dd, 0);
8170 return 0;
8171 }
8172 return -EBUSY;
8173 }
8174
8175 /* register is an index of LCB registers: (offset - base) / 8 */
8176 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8177 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8178 if (ret != HCMD_SUCCESS)
8179 return -EBUSY;
8180 return 0;
8181}
8182
8183/*
8184 * Read an LCB CSR. Access may not be in host control, so check.
8185 * Return 0 on success, -EBUSY on failure.
8186 */
8187int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8188{
8189 struct hfi1_pportdata *ppd = dd->pport;
8190
8191 /* if up, go through the 8051 for the value */
8192 if (ppd->host_link_state & HLS_UP)
8193 return read_lcb_via_8051(dd, addr, data);
8194 /* if going up or down, no access */
8195 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8196 return -EBUSY;
8197 /* otherwise, host has access */
8198 *data = read_csr(dd, addr);
8199 return 0;
8200}
8201
8202/*
8203 * Use the 8051 to write a LCB CSR.
8204 */
8205static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8206{
Dean Luick3bf40d62015-11-06 20:07:04 -05008207 u32 regno;
8208 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008209
Dean Luick3bf40d62015-11-06 20:07:04 -05008210 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8211 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8212 if (acquire_lcb_access(dd, 0) == 0) {
8213 write_csr(dd, addr, data);
8214 release_lcb_access(dd, 0);
8215 return 0;
8216 }
8217 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008218 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008219
8220 /* register is an index of LCB registers: (offset - base) / 8 */
8221 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8222 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8223 if (ret != HCMD_SUCCESS)
8224 return -EBUSY;
8225 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008226}
8227
8228/*
8229 * Write an LCB CSR. Access may not be in host control, so check.
8230 * Return 0 on success, -EBUSY on failure.
8231 */
8232int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8233{
8234 struct hfi1_pportdata *ppd = dd->pport;
8235
8236 /* if up, go through the 8051 for the value */
8237 if (ppd->host_link_state & HLS_UP)
8238 return write_lcb_via_8051(dd, addr, data);
8239 /* if going up or down, no access */
8240 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8241 return -EBUSY;
8242 /* otherwise, host has access */
8243 write_csr(dd, addr, data);
8244 return 0;
8245}
8246
8247/*
8248 * Returns:
8249 * < 0 = Linux error, not able to get access
8250 * > 0 = 8051 command RETURN_CODE
8251 */
8252static int do_8051_command(
8253 struct hfi1_devdata *dd,
8254 u32 type,
8255 u64 in_data,
8256 u64 *out_data)
8257{
8258 u64 reg, completed;
8259 int return_code;
8260 unsigned long flags;
8261 unsigned long timeout;
8262
8263 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8264
8265 /*
8266 * Alternative to holding the lock for a long time:
8267 * - keep busy wait - have other users bounce off
8268 */
8269 spin_lock_irqsave(&dd->dc8051_lock, flags);
8270
8271 /* We can't send any commands to the 8051 if it's in reset */
8272 if (dd->dc_shutdown) {
8273 return_code = -ENODEV;
8274 goto fail;
8275 }
8276
8277 /*
8278 * If an 8051 host command timed out previously, then the 8051 is
8279 * stuck.
8280 *
8281 * On first timeout, attempt to reset and restart the entire DC
8282 * block (including 8051). (Is this too big of a hammer?)
8283 *
8284 * If the 8051 times out a second time, the reset did not bring it
8285 * back to healthy life. In that case, fail any subsequent commands.
8286 */
8287 if (dd->dc8051_timed_out) {
8288 if (dd->dc8051_timed_out > 1) {
8289 dd_dev_err(dd,
8290 "Previous 8051 host command timed out, skipping command %u\n",
8291 type);
8292 return_code = -ENXIO;
8293 goto fail;
8294 }
8295 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8296 dc_shutdown(dd);
8297 dc_start(dd);
8298 spin_lock_irqsave(&dd->dc8051_lock, flags);
8299 }
8300
8301 /*
8302 * If there is no timeout, then the 8051 command interface is
8303 * waiting for a command.
8304 */
8305
8306 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008307 * When writing a LCB CSR, out_data contains the full value to
8308 * to be written, while in_data contains the relative LCB
8309 * address in 7:0. Do the work here, rather than the caller,
8310 * of distrubting the write data to where it needs to go:
8311 *
8312 * Write data
8313 * 39:00 -> in_data[47:8]
8314 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8315 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8316 */
8317 if (type == HCMD_WRITE_LCB_CSR) {
8318 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8319 reg = ((((*out_data) >> 40) & 0xff) <<
8320 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8321 | ((((*out_data) >> 48) & 0xffff) <<
8322 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8323 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8324 }
8325
8326 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008327 * Do two writes: the first to stabilize the type and req_data, the
8328 * second to activate.
8329 */
8330 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8331 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8332 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8333 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8334 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8335 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8336 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8337
8338 /* wait for completion, alternate: interrupt */
8339 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8340 while (1) {
8341 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8342 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8343 if (completed)
8344 break;
8345 if (time_after(jiffies, timeout)) {
8346 dd->dc8051_timed_out++;
8347 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8348 if (out_data)
8349 *out_data = 0;
8350 return_code = -ETIMEDOUT;
8351 goto fail;
8352 }
8353 udelay(2);
8354 }
8355
8356 if (out_data) {
8357 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8358 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8359 if (type == HCMD_READ_LCB_CSR) {
8360 /* top 16 bits are in a different register */
8361 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8362 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8363 << (48
8364 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8365 }
8366 }
8367 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8368 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8369 dd->dc8051_timed_out = 0;
8370 /*
8371 * Clear command for next user.
8372 */
8373 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8374
8375fail:
8376 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8377
8378 return return_code;
8379}
8380
8381static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8382{
8383 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8384}
8385
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008386int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8387 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008388{
8389 u64 data;
8390 int ret;
8391
8392 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8393 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8394 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8395 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8396 if (ret != HCMD_SUCCESS) {
8397 dd_dev_err(dd,
8398 "load 8051 config: field id %d, lane %d, err %d\n",
8399 (int)field_id, (int)lane_id, ret);
8400 }
8401 return ret;
8402}
8403
8404/*
8405 * Read the 8051 firmware "registers". Use the RAM directly. Always
8406 * set the result, even on error.
8407 * Return 0 on success, -errno on failure
8408 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008409int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8410 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008411{
8412 u64 big_data;
8413 u32 addr;
8414 int ret;
8415
8416 /* address start depends on the lane_id */
8417 if (lane_id < 4)
8418 addr = (4 * NUM_GENERAL_FIELDS)
8419 + (lane_id * 4 * NUM_LANE_FIELDS);
8420 else
8421 addr = 0;
8422 addr += field_id * 4;
8423
8424 /* read is in 8-byte chunks, hardware will truncate the address down */
8425 ret = read_8051_data(dd, addr, 8, &big_data);
8426
8427 if (ret == 0) {
8428 /* extract the 4 bytes we want */
8429 if (addr & 0x4)
8430 *result = (u32)(big_data >> 32);
8431 else
8432 *result = (u32)big_data;
8433 } else {
8434 *result = 0;
8435 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8436 __func__, lane_id, field_id);
8437 }
8438
8439 return ret;
8440}
8441
8442static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8443 u8 continuous)
8444{
8445 u32 frame;
8446
8447 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8448 | power_management << POWER_MANAGEMENT_SHIFT;
8449 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8450 GENERAL_CONFIG, frame);
8451}
8452
8453static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8454 u16 vl15buf, u8 crc_sizes)
8455{
8456 u32 frame;
8457
8458 frame = (u32)vau << VAU_SHIFT
8459 | (u32)z << Z_SHIFT
8460 | (u32)vcu << VCU_SHIFT
8461 | (u32)vl15buf << VL15BUF_SHIFT
8462 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8463 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8464 GENERAL_CONFIG, frame);
8465}
8466
8467static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8468 u8 *flag_bits, u16 *link_widths)
8469{
8470 u32 frame;
8471
8472 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8473 &frame);
8474 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8475 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8476 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8477}
8478
8479static int write_vc_local_link_width(struct hfi1_devdata *dd,
8480 u8 misc_bits,
8481 u8 flag_bits,
8482 u16 link_widths)
8483{
8484 u32 frame;
8485
8486 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8487 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8488 | (u32)link_widths << LINK_WIDTH_SHIFT;
8489 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8490 frame);
8491}
8492
8493static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8494 u8 device_rev)
8495{
8496 u32 frame;
8497
8498 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8499 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8500 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8501}
8502
8503static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8504 u8 *device_rev)
8505{
8506 u32 frame;
8507
8508 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8509 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8510 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8511 & REMOTE_DEVICE_REV_MASK;
8512}
8513
8514void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8515{
8516 u32 frame;
8517
8518 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8519 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8520 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8521}
8522
8523static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8524 u8 *continuous)
8525{
8526 u32 frame;
8527
8528 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8529 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8530 & POWER_MANAGEMENT_MASK;
8531 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8532 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8533}
8534
8535static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8536 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8537{
8538 u32 frame;
8539
8540 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8541 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8542 *z = (frame >> Z_SHIFT) & Z_MASK;
8543 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8544 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8545 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8546}
8547
8548static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8549 u8 *remote_tx_rate,
8550 u16 *link_widths)
8551{
8552 u32 frame;
8553
8554 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8555 &frame);
8556 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8557 & REMOTE_TX_RATE_MASK;
8558 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8559}
8560
8561static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8562{
8563 u32 frame;
8564
8565 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8566 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8567}
8568
8569static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8570{
8571 u32 frame;
8572
8573 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8574 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8575}
8576
8577static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8578{
8579 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8580}
8581
8582static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8583{
8584 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8585}
8586
8587void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8588{
8589 u32 frame;
8590 int ret;
8591
8592 *link_quality = 0;
8593 if (dd->pport->host_link_state & HLS_UP) {
8594 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8595 &frame);
8596 if (ret == 0)
8597 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8598 & LINK_QUALITY_MASK;
8599 }
8600}
8601
8602static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8603{
8604 u32 frame;
8605
8606 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8607 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8608}
8609
8610static int read_tx_settings(struct hfi1_devdata *dd,
8611 u8 *enable_lane_tx,
8612 u8 *tx_polarity_inversion,
8613 u8 *rx_polarity_inversion,
8614 u8 *max_rate)
8615{
8616 u32 frame;
8617 int ret;
8618
8619 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8620 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8621 & ENABLE_LANE_TX_MASK;
8622 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8623 & TX_POLARITY_INVERSION_MASK;
8624 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8625 & RX_POLARITY_INVERSION_MASK;
8626 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8627 return ret;
8628}
8629
8630static int write_tx_settings(struct hfi1_devdata *dd,
8631 u8 enable_lane_tx,
8632 u8 tx_polarity_inversion,
8633 u8 rx_polarity_inversion,
8634 u8 max_rate)
8635{
8636 u32 frame;
8637
8638 /* no need to mask, all variable sizes match field widths */
8639 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8640 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8641 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8642 | max_rate << MAX_RATE_SHIFT;
8643 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8644}
8645
8646static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8647{
8648 u32 frame, version, prod_id;
8649 int ret, lane;
8650
8651 /* 4 lanes */
8652 for (lane = 0; lane < 4; lane++) {
8653 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8654 if (ret) {
8655 dd_dev_err(
8656 dd,
8657 "Unable to read lane %d firmware details\n",
8658 lane);
8659 continue;
8660 }
8661 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8662 & SPICO_ROM_VERSION_MASK;
8663 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8664 & SPICO_ROM_PROD_ID_MASK;
8665 dd_dev_info(dd,
8666 "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8667 lane, version, prod_id);
8668 }
8669}
8670
8671/*
8672 * Read an idle LCB message.
8673 *
8674 * Returns 0 on success, -EINVAL on error
8675 */
8676static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8677{
8678 int ret;
8679
8680 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
8681 type, data_out);
8682 if (ret != HCMD_SUCCESS) {
8683 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8684 (u32)type, ret);
8685 return -EINVAL;
8686 }
8687 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8688 /* return only the payload as we already know the type */
8689 *data_out >>= IDLE_PAYLOAD_SHIFT;
8690 return 0;
8691}
8692
8693/*
8694 * Read an idle SMA message. To be done in response to a notification from
8695 * the 8051.
8696 *
8697 * Returns 0 on success, -EINVAL on error
8698 */
8699static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8700{
8701 return read_idle_message(dd,
8702 (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
8703}
8704
8705/*
8706 * Send an idle LCB message.
8707 *
8708 * Returns 0 on success, -EINVAL on error
8709 */
8710static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8711{
8712 int ret;
8713
8714 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8715 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8716 if (ret != HCMD_SUCCESS) {
8717 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8718 data, ret);
8719 return -EINVAL;
8720 }
8721 return 0;
8722}
8723
8724/*
8725 * Send an idle SMA message.
8726 *
8727 * Returns 0 on success, -EINVAL on error
8728 */
8729int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8730{
8731 u64 data;
8732
8733 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
8734 | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8735 return send_idle_message(dd, data);
8736}
8737
8738/*
8739 * Initialize the LCB then do a quick link up. This may or may not be
8740 * in loopback.
8741 *
8742 * return 0 on success, -errno on error
8743 */
8744static int do_quick_linkup(struct hfi1_devdata *dd)
8745{
8746 u64 reg;
8747 unsigned long timeout;
8748 int ret;
8749
8750 lcb_shutdown(dd, 0);
8751
8752 if (loopback) {
8753 /* LCB_CFG_LOOPBACK.VAL = 2 */
8754 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8755 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8756 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8757 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8758 }
8759
8760 /* start the LCBs */
8761 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8762 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8763
8764 /* simulator only loopback steps */
8765 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8766 /* LCB_CFG_RUN.EN = 1 */
8767 write_csr(dd, DC_LCB_CFG_RUN,
8768 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8769
8770 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8771 timeout = jiffies + msecs_to_jiffies(10);
8772 while (1) {
8773 reg = read_csr(dd,
8774 DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8775 if (reg)
8776 break;
8777 if (time_after(jiffies, timeout)) {
8778 dd_dev_err(dd,
8779 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8780 return -ETIMEDOUT;
8781 }
8782 udelay(2);
8783 }
8784
8785 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8786 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8787 }
8788
8789 if (!loopback) {
8790 /*
8791 * When doing quick linkup and not in loopback, both
8792 * sides must be done with LCB set-up before either
8793 * starts the quick linkup. Put a delay here so that
8794 * both sides can be started and have a chance to be
8795 * done with LCB set up before resuming.
8796 */
8797 dd_dev_err(dd,
8798 "Pausing for peer to be finished with LCB set up\n");
8799 msleep(5000);
8800 dd_dev_err(dd,
8801 "Continuing with quick linkup\n");
8802 }
8803
8804 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8805 set_8051_lcb_access(dd);
8806
8807 /*
8808 * State "quick" LinkUp request sets the physical link state to
8809 * LinkUp without a verify capability sequence.
8810 * This state is in simulator v37 and later.
8811 */
8812 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8813 if (ret != HCMD_SUCCESS) {
8814 dd_dev_err(dd,
8815 "%s: set physical link state to quick LinkUp failed with return %d\n",
8816 __func__, ret);
8817
8818 set_host_lcb_access(dd);
8819 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8820
8821 if (ret >= 0)
8822 ret = -EINVAL;
8823 return ret;
8824 }
8825
8826 return 0; /* success */
8827}
8828
8829/*
8830 * Set the SerDes to internal loopback mode.
8831 * Returns 0 on success, -errno on error.
8832 */
8833static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8834{
8835 int ret;
8836
8837 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8838 if (ret == HCMD_SUCCESS)
8839 return 0;
8840 dd_dev_err(dd,
8841 "Set physical link state to SerDes Loopback failed with return %d\n",
8842 ret);
8843 if (ret >= 0)
8844 ret = -EINVAL;
8845 return ret;
8846}
8847
8848/*
8849 * Do all special steps to set up loopback.
8850 */
8851static int init_loopback(struct hfi1_devdata *dd)
8852{
8853 dd_dev_info(dd, "Entering loopback mode\n");
8854
8855 /* all loopbacks should disable self GUID check */
8856 write_csr(dd, DC_DC8051_CFG_MODE,
8857 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8858
8859 /*
8860 * The simulator has only one loopback option - LCB. Switch
8861 * to that option, which includes quick link up.
8862 *
8863 * Accept all valid loopback values.
8864 */
8865 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
8866 && (loopback == LOOPBACK_SERDES
8867 || loopback == LOOPBACK_LCB
8868 || loopback == LOOPBACK_CABLE)) {
8869 loopback = LOOPBACK_LCB;
8870 quick_linkup = 1;
8871 return 0;
8872 }
8873
8874 /* handle serdes loopback */
8875 if (loopback == LOOPBACK_SERDES) {
8876 /* internal serdes loopack needs quick linkup on RTL */
8877 if (dd->icode == ICODE_RTL_SILICON)
8878 quick_linkup = 1;
8879 return set_serdes_loopback_mode(dd);
8880 }
8881
8882 /* LCB loopback - handled at poll time */
8883 if (loopback == LOOPBACK_LCB) {
8884 quick_linkup = 1; /* LCB is always quick linkup */
8885
8886 /* not supported in emulation due to emulation RTL changes */
8887 if (dd->icode == ICODE_FPGA_EMULATION) {
8888 dd_dev_err(dd,
8889 "LCB loopback not supported in emulation\n");
8890 return -EINVAL;
8891 }
8892 return 0;
8893 }
8894
8895 /* external cable loopback requires no extra steps */
8896 if (loopback == LOOPBACK_CABLE)
8897 return 0;
8898
8899 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8900 return -EINVAL;
8901}
8902
8903/*
8904 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8905 * used in the Verify Capability link width attribute.
8906 */
8907static u16 opa_to_vc_link_widths(u16 opa_widths)
8908{
8909 int i;
8910 u16 result = 0;
8911
8912 static const struct link_bits {
8913 u16 from;
8914 u16 to;
8915 } opa_link_xlate[] = {
8916 { OPA_LINK_WIDTH_1X, 1 << (1-1) },
8917 { OPA_LINK_WIDTH_2X, 1 << (2-1) },
8918 { OPA_LINK_WIDTH_3X, 1 << (3-1) },
8919 { OPA_LINK_WIDTH_4X, 1 << (4-1) },
8920 };
8921
8922 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8923 if (opa_widths & opa_link_xlate[i].from)
8924 result |= opa_link_xlate[i].to;
8925 }
8926 return result;
8927}
8928
8929/*
8930 * Set link attributes before moving to polling.
8931 */
8932static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8933{
8934 struct hfi1_devdata *dd = ppd->dd;
8935 u8 enable_lane_tx;
8936 u8 tx_polarity_inversion;
8937 u8 rx_polarity_inversion;
8938 int ret;
8939
8940 /* reset our fabric serdes to clear any lingering problems */
8941 fabric_serdes_reset(dd);
8942
8943 /* set the local tx rate - need to read-modify-write */
8944 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8945 &rx_polarity_inversion, &ppd->local_tx_rate);
8946 if (ret)
8947 goto set_local_link_attributes_fail;
8948
8949 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
8950 /* set the tx rate to the fastest enabled */
8951 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8952 ppd->local_tx_rate = 1;
8953 else
8954 ppd->local_tx_rate = 0;
8955 } else {
8956 /* set the tx rate to all enabled */
8957 ppd->local_tx_rate = 0;
8958 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
8959 ppd->local_tx_rate |= 2;
8960 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
8961 ppd->local_tx_rate |= 1;
8962 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04008963
8964 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008965 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
8966 rx_polarity_inversion, ppd->local_tx_rate);
8967 if (ret != HCMD_SUCCESS)
8968 goto set_local_link_attributes_fail;
8969
8970 /*
8971 * DC supports continuous updates.
8972 */
8973 ret = write_vc_local_phy(dd, 0 /* no power management */,
8974 1 /* continuous updates */);
8975 if (ret != HCMD_SUCCESS)
8976 goto set_local_link_attributes_fail;
8977
8978 /* z=1 in the next call: AU of 0 is not supported by the hardware */
8979 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
8980 ppd->port_crc_mode_enabled);
8981 if (ret != HCMD_SUCCESS)
8982 goto set_local_link_attributes_fail;
8983
8984 ret = write_vc_local_link_width(dd, 0, 0,
8985 opa_to_vc_link_widths(ppd->link_width_enabled));
8986 if (ret != HCMD_SUCCESS)
8987 goto set_local_link_attributes_fail;
8988
8989 /* let peer know who we are */
8990 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
8991 if (ret == HCMD_SUCCESS)
8992 return 0;
8993
8994set_local_link_attributes_fail:
8995 dd_dev_err(dd,
8996 "Failed to set local link attributes, return 0x%x\n",
8997 ret);
8998 return ret;
8999}
9000
9001/*
9002 * Call this to start the link. Schedule a retry if the cable is not
9003 * present or if unable to start polling. Do not do anything if the
9004 * link is disabled. Returns 0 if link is disabled or moved to polling
9005 */
9006int start_link(struct hfi1_pportdata *ppd)
9007{
9008 if (!ppd->link_enabled) {
9009 dd_dev_info(ppd->dd,
9010 "%s: stopping link start because link is disabled\n",
9011 __func__);
9012 return 0;
9013 }
9014 if (!ppd->driver_link_ready) {
9015 dd_dev_info(ppd->dd,
9016 "%s: stopping link start because driver is not ready\n",
9017 __func__);
9018 return 0;
9019 }
9020
9021 if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9022 loopback == LOOPBACK_LCB ||
9023 ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9024 return set_link_state(ppd, HLS_DN_POLL);
9025
9026 dd_dev_info(ppd->dd,
9027 "%s: stopping link start because no cable is present\n",
9028 __func__);
9029 return -EAGAIN;
9030}
9031
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009032static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9033{
9034 struct hfi1_devdata *dd = ppd->dd;
9035 u64 mask;
9036 unsigned long timeout;
9037
9038 /*
9039 * Check for QSFP interrupt for t_init (SFF 8679)
9040 */
9041 timeout = jiffies + msecs_to_jiffies(2000);
9042 while (1) {
9043 mask = read_csr(dd, dd->hfi1_id ?
9044 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9045 if (!(mask & QSFP_HFI0_INT_N)) {
9046 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9047 ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9048 break;
9049 }
9050 if (time_after(jiffies, timeout)) {
9051 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9052 __func__);
9053 break;
9054 }
9055 udelay(2);
9056 }
9057}
9058
9059static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9060{
9061 struct hfi1_devdata *dd = ppd->dd;
9062 u64 mask;
9063
9064 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9065 if (enable)
9066 mask |= (u64)QSFP_HFI0_INT_N;
9067 else
9068 mask &= ~(u64)QSFP_HFI0_INT_N;
9069 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9070}
9071
9072void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009073{
9074 struct hfi1_devdata *dd = ppd->dd;
9075 u64 mask, qsfp_mask;
9076
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009077 /* Disable INT_N from triggering QSFP interrupts */
9078 set_qsfp_int_n(ppd, 0);
9079
9080 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009081 mask = (u64)QSFP_HFI0_RESET_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009082 qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009083 qsfp_mask |= mask;
9084 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009085 dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009086
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009087 qsfp_mask = read_csr(dd, dd->hfi1_id ?
9088 ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009089 qsfp_mask &= ~mask;
9090 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009091 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009092
9093 udelay(10);
9094
9095 qsfp_mask |= mask;
9096 write_csr(dd,
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009097 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9098
9099 wait_for_qsfp_init(ppd);
9100
9101 /*
9102 * Allow INT_N to trigger the QSFP interrupt to watch
9103 * for alarms and warnings
9104 */
9105 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009106}
9107
9108static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9109 u8 *qsfp_interrupt_status)
9110{
9111 struct hfi1_devdata *dd = ppd->dd;
9112
9113 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9114 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9115 dd_dev_info(dd,
9116 "%s: QSFP cable on fire\n",
9117 __func__);
9118
9119 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9120 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9121 dd_dev_info(dd,
9122 "%s: QSFP cable temperature too low\n",
9123 __func__);
9124
9125 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9126 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9127 dd_dev_info(dd,
9128 "%s: QSFP supply voltage too high\n",
9129 __func__);
9130
9131 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9132 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9133 dd_dev_info(dd,
9134 "%s: QSFP supply voltage too low\n",
9135 __func__);
9136
9137 /* Byte 2 is vendor specific */
9138
9139 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9140 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9141 dd_dev_info(dd,
9142 "%s: Cable RX channel 1/2 power too high\n",
9143 __func__);
9144
9145 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9146 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9147 dd_dev_info(dd,
9148 "%s: Cable RX channel 1/2 power too low\n",
9149 __func__);
9150
9151 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9152 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9153 dd_dev_info(dd,
9154 "%s: Cable RX channel 3/4 power too high\n",
9155 __func__);
9156
9157 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9158 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9159 dd_dev_info(dd,
9160 "%s: Cable RX channel 3/4 power too low\n",
9161 __func__);
9162
9163 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9164 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9165 dd_dev_info(dd,
9166 "%s: Cable TX channel 1/2 bias too high\n",
9167 __func__);
9168
9169 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9170 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9171 dd_dev_info(dd,
9172 "%s: Cable TX channel 1/2 bias too low\n",
9173 __func__);
9174
9175 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9176 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9177 dd_dev_info(dd,
9178 "%s: Cable TX channel 3/4 bias too high\n",
9179 __func__);
9180
9181 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9182 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9183 dd_dev_info(dd,
9184 "%s: Cable TX channel 3/4 bias too low\n",
9185 __func__);
9186
9187 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9188 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9189 dd_dev_info(dd,
9190 "%s: Cable TX channel 1/2 power too high\n",
9191 __func__);
9192
9193 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9194 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9195 dd_dev_info(dd,
9196 "%s: Cable TX channel 1/2 power too low\n",
9197 __func__);
9198
9199 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9200 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9201 dd_dev_info(dd,
9202 "%s: Cable TX channel 3/4 power too high\n",
9203 __func__);
9204
9205 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9206 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9207 dd_dev_info(dd,
9208 "%s: Cable TX channel 3/4 power too low\n",
9209 __func__);
9210
9211 /* Bytes 9-10 and 11-12 are reserved */
9212 /* Bytes 13-15 are vendor specific */
9213
9214 return 0;
9215}
9216
Mike Marciniszyn77241052015-07-30 15:17:43 -04009217/* This routine will only be scheduled if the QSFP module is present */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009218void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009219{
9220 struct qsfp_data *qd;
9221 struct hfi1_pportdata *ppd;
9222 struct hfi1_devdata *dd;
9223
9224 qd = container_of(work, struct qsfp_data, qsfp_work);
9225 ppd = qd->ppd;
9226 dd = ppd->dd;
9227
9228 /* Sanity check */
9229 if (!qsfp_mod_present(ppd))
9230 return;
9231
9232 /*
9233 * Turn DC back on after cables has been
9234 * re-inserted. Up until now, the DC has been in
9235 * reset to save power.
9236 */
9237 dc_start(dd);
9238
9239 if (qd->cache_refresh_required) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009240
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009241 set_qsfp_int_n(ppd, 0);
9242
9243 wait_for_qsfp_init(ppd);
9244
9245 /*
9246 * Allow INT_N to trigger the QSFP interrupt to watch
9247 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009248 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009249 set_qsfp_int_n(ppd, 1);
9250
9251 tune_serdes(ppd);
9252
9253 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009254 }
9255
9256 if (qd->check_interrupt_flags) {
9257 u8 qsfp_interrupt_status[16] = {0,};
9258
9259 if (qsfp_read(ppd, dd->hfi1_id, 6,
9260 &qsfp_interrupt_status[0], 16) != 16) {
9261 dd_dev_info(dd,
9262 "%s: Failed to read status of QSFP module\n",
9263 __func__);
9264 } else {
9265 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009266
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009267 handle_qsfp_error_conditions(
9268 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009269 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9270 ppd->qsfp_info.check_interrupt_flags = 0;
9271 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9272 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009273 }
9274 }
9275}
9276
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009277static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009278{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009279 struct hfi1_pportdata *ppd = dd->pport;
9280 u64 qsfp_mask, cce_int_mask;
9281 const int qsfp1_int_smask = QSFP1_INT % 64;
9282 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009283
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009284 /*
9285 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9286 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9287 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9288 * the index of the appropriate CSR in the CCEIntMask CSR array
9289 */
9290 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9291 (8 * (QSFP1_INT / 64)));
9292 if (dd->hfi1_id) {
9293 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9294 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9295 cce_int_mask);
9296 } else {
9297 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9298 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9299 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009300 }
9301
Mike Marciniszyn77241052015-07-30 15:17:43 -04009302 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9303 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009304 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9305 qsfp_mask);
9306 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9307 qsfp_mask);
9308
9309 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009310
9311 /* Handle active low nature of INT_N and MODPRST_N pins */
9312 if (qsfp_mod_present(ppd))
9313 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9314 write_csr(dd,
9315 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9316 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009317}
9318
Dean Luickbbdeb332015-12-01 15:38:15 -05009319/*
9320 * Do a one-time initialize of the LCB block.
9321 */
9322static void init_lcb(struct hfi1_devdata *dd)
9323{
Dean Luicka59329d2016-02-03 14:32:31 -08009324 /* simulator does not correctly handle LCB cclk loopback, skip */
9325 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9326 return;
9327
Dean Luickbbdeb332015-12-01 15:38:15 -05009328 /* the DC has been reset earlier in the driver load */
9329
9330 /* set LCB for cclk loopback on the port */
9331 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9332 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9333 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9334 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9335 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9336 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9337 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9338}
9339
Mike Marciniszyn77241052015-07-30 15:17:43 -04009340int bringup_serdes(struct hfi1_pportdata *ppd)
9341{
9342 struct hfi1_devdata *dd = ppd->dd;
9343 u64 guid;
9344 int ret;
9345
9346 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9347 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9348
9349 guid = ppd->guid;
9350 if (!guid) {
9351 if (dd->base_guid)
9352 guid = dd->base_guid + ppd->port - 1;
9353 ppd->guid = guid;
9354 }
9355
Mike Marciniszyn77241052015-07-30 15:17:43 -04009356 /* Set linkinit_reason on power up per OPA spec */
9357 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9358
Dean Luickbbdeb332015-12-01 15:38:15 -05009359 /* one-time init of the LCB */
9360 init_lcb(dd);
9361
Mike Marciniszyn77241052015-07-30 15:17:43 -04009362 if (loopback) {
9363 ret = init_loopback(dd);
9364 if (ret < 0)
9365 return ret;
9366 }
9367
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009368 /* tune the SERDES to a ballpark setting for
9369 * optimal signal and bit error rate
9370 * Needs to be done before starting the link
9371 */
9372 tune_serdes(ppd);
9373
Mike Marciniszyn77241052015-07-30 15:17:43 -04009374 return start_link(ppd);
9375}
9376
9377void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9378{
9379 struct hfi1_devdata *dd = ppd->dd;
9380
9381 /*
9382 * Shut down the link and keep it down. First turn off that the
9383 * driver wants to allow the link to be up (driver_link_ready).
9384 * Then make sure the link is not automatically restarted
9385 * (link_enabled). Cancel any pending restart. And finally
9386 * go offline.
9387 */
9388 ppd->driver_link_ready = 0;
9389 ppd->link_enabled = 0;
9390
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009391 ppd->offline_disabled_reason =
9392 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009393 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9394 OPA_LINKDOWN_REASON_SMA_DISABLED);
9395 set_link_state(ppd, HLS_DN_OFFLINE);
9396
9397 /* disable the port */
9398 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9399}
9400
9401static inline int init_cpu_counters(struct hfi1_devdata *dd)
9402{
9403 struct hfi1_pportdata *ppd;
9404 int i;
9405
9406 ppd = (struct hfi1_pportdata *)(dd + 1);
9407 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009408 ppd->ibport_data.rvp.rc_acks = NULL;
9409 ppd->ibport_data.rvp.rc_qacks = NULL;
9410 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9411 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9412 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9413 if (!ppd->ibport_data.rvp.rc_acks ||
9414 !ppd->ibport_data.rvp.rc_delayed_comp ||
9415 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009416 return -ENOMEM;
9417 }
9418
9419 return 0;
9420}
9421
9422static const char * const pt_names[] = {
9423 "expected",
9424 "eager",
9425 "invalid"
9426};
9427
9428static const char *pt_name(u32 type)
9429{
9430 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9431}
9432
9433/*
9434 * index is the index into the receive array
9435 */
9436void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9437 u32 type, unsigned long pa, u16 order)
9438{
9439 u64 reg;
9440 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9441 (dd->kregbase + RCV_ARRAY));
9442
9443 if (!(dd->flags & HFI1_PRESENT))
9444 goto done;
9445
9446 if (type == PT_INVALID) {
9447 pa = 0;
9448 } else if (type > PT_INVALID) {
9449 dd_dev_err(dd,
9450 "unexpected receive array type %u for index %u, not handled\n",
9451 type, index);
9452 goto done;
9453 }
9454
9455 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9456 pt_name(type), index, pa, (unsigned long)order);
9457
9458#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9459 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9460 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9461 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9462 << RCV_ARRAY_RT_ADDR_SHIFT;
9463 writeq(reg, base + (index * 8));
9464
9465 if (type == PT_EAGER)
9466 /*
9467 * Eager entries are written one-by-one so we have to push them
9468 * after we write the entry.
9469 */
9470 flush_wc();
9471done:
9472 return;
9473}
9474
9475void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9476{
9477 struct hfi1_devdata *dd = rcd->dd;
9478 u32 i;
9479
9480 /* this could be optimized */
9481 for (i = rcd->eager_base; i < rcd->eager_base +
9482 rcd->egrbufs.alloced; i++)
9483 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9484
9485 for (i = rcd->expected_base;
9486 i < rcd->expected_base + rcd->expected_count; i++)
9487 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9488}
9489
9490int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9491 struct hfi1_ctxt_info *kinfo)
9492{
9493 kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9494 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9495 return 0;
9496}
9497
9498struct hfi1_message_header *hfi1_get_msgheader(
9499 struct hfi1_devdata *dd, __le32 *rhf_addr)
9500{
9501 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9502
9503 return (struct hfi1_message_header *)
9504 (rhf_addr - dd->rhf_offset + offset);
9505}
9506
9507static const char * const ib_cfg_name_strings[] = {
9508 "HFI1_IB_CFG_LIDLMC",
9509 "HFI1_IB_CFG_LWID_DG_ENB",
9510 "HFI1_IB_CFG_LWID_ENB",
9511 "HFI1_IB_CFG_LWID",
9512 "HFI1_IB_CFG_SPD_ENB",
9513 "HFI1_IB_CFG_SPD",
9514 "HFI1_IB_CFG_RXPOL_ENB",
9515 "HFI1_IB_CFG_LREV_ENB",
9516 "HFI1_IB_CFG_LINKLATENCY",
9517 "HFI1_IB_CFG_HRTBT",
9518 "HFI1_IB_CFG_OP_VLS",
9519 "HFI1_IB_CFG_VL_HIGH_CAP",
9520 "HFI1_IB_CFG_VL_LOW_CAP",
9521 "HFI1_IB_CFG_OVERRUN_THRESH",
9522 "HFI1_IB_CFG_PHYERR_THRESH",
9523 "HFI1_IB_CFG_LINKDEFAULT",
9524 "HFI1_IB_CFG_PKEYS",
9525 "HFI1_IB_CFG_MTU",
9526 "HFI1_IB_CFG_LSTATE",
9527 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9528 "HFI1_IB_CFG_PMA_TICKS",
9529 "HFI1_IB_CFG_PORT"
9530};
9531
9532static const char *ib_cfg_name(int which)
9533{
9534 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9535 return "invalid";
9536 return ib_cfg_name_strings[which];
9537}
9538
9539int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9540{
9541 struct hfi1_devdata *dd = ppd->dd;
9542 int val = 0;
9543
9544 switch (which) {
9545 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9546 val = ppd->link_width_enabled;
9547 break;
9548 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9549 val = ppd->link_width_active;
9550 break;
9551 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9552 val = ppd->link_speed_enabled;
9553 break;
9554 case HFI1_IB_CFG_SPD: /* current Link speed */
9555 val = ppd->link_speed_active;
9556 break;
9557
9558 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9559 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9560 case HFI1_IB_CFG_LINKLATENCY:
9561 goto unimplemented;
9562
9563 case HFI1_IB_CFG_OP_VLS:
9564 val = ppd->vls_operational;
9565 break;
9566 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9567 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9568 break;
9569 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9570 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9571 break;
9572 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9573 val = ppd->overrun_threshold;
9574 break;
9575 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9576 val = ppd->phy_error_threshold;
9577 break;
9578 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9579 val = dd->link_default;
9580 break;
9581
9582 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9583 case HFI1_IB_CFG_PMA_TICKS:
9584 default:
9585unimplemented:
9586 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9587 dd_dev_info(
9588 dd,
9589 "%s: which %s: not implemented\n",
9590 __func__,
9591 ib_cfg_name(which));
9592 break;
9593 }
9594
9595 return val;
9596}
9597
9598/*
9599 * The largest MAD packet size.
9600 */
9601#define MAX_MAD_PACKET 2048
9602
9603/*
9604 * Return the maximum header bytes that can go on the _wire_
9605 * for this device. This count includes the ICRC which is
9606 * not part of the packet held in memory but it is appended
9607 * by the HW.
9608 * This is dependent on the device's receive header entry size.
9609 * HFI allows this to be set per-receive context, but the
9610 * driver presently enforces a global value.
9611 */
9612u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9613{
9614 /*
9615 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9616 * the Receive Header Entry Size minus the PBC (or RHF) size
9617 * plus one DW for the ICRC appended by HW.
9618 *
9619 * dd->rcd[0].rcvhdrqentsize is in DW.
9620 * We use rcd[0] as all context will have the same value. Also,
9621 * the first kernel context would have been allocated by now so
9622 * we are guaranteed a valid value.
9623 */
9624 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9625}
9626
9627/*
9628 * Set Send Length
9629 * @ppd - per port data
9630 *
9631 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9632 * registers compare against LRH.PktLen, so use the max bytes included
9633 * in the LRH.
9634 *
9635 * This routine changes all VL values except VL15, which it maintains at
9636 * the same value.
9637 */
9638static void set_send_length(struct hfi1_pportdata *ppd)
9639{
9640 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009641 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9642 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009643 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9644 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9645 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9646 int i;
9647
9648 for (i = 0; i < ppd->vls_supported; i++) {
9649 if (dd->vld[i].mtu > maxvlmtu)
9650 maxvlmtu = dd->vld[i].mtu;
9651 if (i <= 3)
9652 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9653 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9654 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9655 else
9656 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9657 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9658 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9659 }
9660 write_csr(dd, SEND_LEN_CHECK0, len1);
9661 write_csr(dd, SEND_LEN_CHECK1, len2);
9662 /* adjust kernel credit return thresholds based on new MTUs */
9663 /* all kernel receive contexts have the same hdrqentsize */
9664 for (i = 0; i < ppd->vls_supported; i++) {
9665 sc_set_cr_threshold(dd->vld[i].sc,
9666 sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
9667 dd->rcd[0]->rcvhdrqentsize));
9668 }
9669 sc_set_cr_threshold(dd->vld[15].sc,
9670 sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
9671 dd->rcd[0]->rcvhdrqentsize));
9672
9673 /* Adjust maximum MTU for the port in DC */
9674 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9675 (ilog2(maxvlmtu >> 8) + 1);
9676 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9677 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9678 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9679 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9680 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9681}
9682
9683static void set_lidlmc(struct hfi1_pportdata *ppd)
9684{
9685 int i;
9686 u64 sreg = 0;
9687 struct hfi1_devdata *dd = ppd->dd;
9688 u32 mask = ~((1U << ppd->lmc) - 1);
9689 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9690
9691 if (dd->hfi1_snoop.mode_flag)
9692 dd_dev_info(dd, "Set lid/lmc while snooping");
9693
9694 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9695 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9696 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9697 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
9698 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9699 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9700 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9701
9702 /*
9703 * Iterate over all the send contexts and set their SLID check
9704 */
9705 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9706 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9707 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9708 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9709
9710 for (i = 0; i < dd->chip_send_contexts; i++) {
9711 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9712 i, (u32)sreg);
9713 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9714 }
9715
9716 /* Now we have to do the same thing for the sdma engines */
9717 sdma_update_lmc(dd, mask, ppd->lid);
9718}
9719
9720static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9721{
9722 unsigned long timeout;
9723 u32 curr_state;
9724
9725 timeout = jiffies + msecs_to_jiffies(msecs);
9726 while (1) {
9727 curr_state = read_physical_state(dd);
9728 if (curr_state == state)
9729 break;
9730 if (time_after(jiffies, timeout)) {
9731 dd_dev_err(dd,
9732 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9733 state, curr_state);
9734 return -ETIMEDOUT;
9735 }
9736 usleep_range(1950, 2050); /* sleep 2ms-ish */
9737 }
9738
9739 return 0;
9740}
9741
9742/*
9743 * Helper for set_link_state(). Do not call except from that routine.
9744 * Expects ppd->hls_mutex to be held.
9745 *
9746 * @rem_reason value to be sent to the neighbor
9747 *
9748 * LinkDownReasons only set if transition succeeds.
9749 */
9750static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9751{
9752 struct hfi1_devdata *dd = ppd->dd;
9753 u32 pstate, previous_state;
9754 u32 last_local_state;
9755 u32 last_remote_state;
9756 int ret;
9757 int do_transition;
9758 int do_wait;
9759
9760 previous_state = ppd->host_link_state;
9761 ppd->host_link_state = HLS_GOING_OFFLINE;
9762 pstate = read_physical_state(dd);
9763 if (pstate == PLS_OFFLINE) {
9764 do_transition = 0; /* in right state */
9765 do_wait = 0; /* ...no need to wait */
9766 } else if ((pstate & 0xff) == PLS_OFFLINE) {
9767 do_transition = 0; /* in an offline transient state */
9768 do_wait = 1; /* ...wait for it to settle */
9769 } else {
9770 do_transition = 1; /* need to move to offline */
9771 do_wait = 1; /* ...will need to wait */
9772 }
9773
9774 if (do_transition) {
9775 ret = set_physical_link_state(dd,
9776 PLS_OFFLINE | (rem_reason << 8));
9777
9778 if (ret != HCMD_SUCCESS) {
9779 dd_dev_err(dd,
9780 "Failed to transition to Offline link state, return %d\n",
9781 ret);
9782 return -EINVAL;
9783 }
Bryan Morgana9c05e32016-02-03 14:30:49 -08009784 if (ppd->offline_disabled_reason ==
9785 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -04009786 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -08009787 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009788 }
9789
9790 if (do_wait) {
9791 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -04009792 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009793 if (ret < 0)
9794 return ret;
9795 }
9796
9797 /* make sure the logical state is also down */
9798 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9799
9800 /*
9801 * Now in charge of LCB - must be after the physical state is
9802 * offline.quiet and before host_link_state is changed.
9803 */
9804 set_host_lcb_access(dd);
9805 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9806 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9807
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009808 if (ppd->port_type == PORT_TYPE_QSFP &&
9809 ppd->qsfp_info.limiting_active &&
9810 qsfp_mod_present(ppd)) {
9811 set_qsfp_tx(ppd, 0);
9812 }
9813
Mike Marciniszyn77241052015-07-30 15:17:43 -04009814 /*
9815 * The LNI has a mandatory wait time after the physical state
9816 * moves to Offline.Quiet. The wait time may be different
9817 * depending on how the link went down. The 8051 firmware
9818 * will observe the needed wait time and only move to ready
9819 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -05009820 * is 6s, so wait that long and then at least 0.5s more for
9821 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009822 */
Dean Luick05087f3b2015-12-01 15:38:16 -05009823 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009824 if (ret) {
9825 dd_dev_err(dd,
9826 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9827 /* state is really offline, so make it so */
9828 ppd->host_link_state = HLS_DN_OFFLINE;
9829 return ret;
9830 }
9831
9832 /*
9833 * The state is now offline and the 8051 is ready to accept host
9834 * requests.
9835 * - change our state
9836 * - notify others if we were previously in a linkup state
9837 */
9838 ppd->host_link_state = HLS_DN_OFFLINE;
9839 if (previous_state & HLS_UP) {
9840 /* went down while link was up */
9841 handle_linkup_change(dd, 0);
9842 } else if (previous_state
9843 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9844 /* went down while attempting link up */
9845 /* byte 1 of last_*_state is the failure reason */
9846 read_last_local_state(dd, &last_local_state);
9847 read_last_remote_state(dd, &last_remote_state);
9848 dd_dev_err(dd,
9849 "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9850 last_local_state, last_remote_state);
9851 }
9852
9853 /* the active link width (downgrade) is 0 on link down */
9854 ppd->link_width_active = 0;
9855 ppd->link_width_downgrade_tx_active = 0;
9856 ppd->link_width_downgrade_rx_active = 0;
9857 ppd->current_egress_rate = 0;
9858 return 0;
9859}
9860
9861/* return the link state name */
9862static const char *link_state_name(u32 state)
9863{
9864 const char *name;
9865 int n = ilog2(state);
9866 static const char * const names[] = {
9867 [__HLS_UP_INIT_BP] = "INIT",
9868 [__HLS_UP_ARMED_BP] = "ARMED",
9869 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
9870 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
9871 [__HLS_DN_POLL_BP] = "POLL",
9872 [__HLS_DN_DISABLE_BP] = "DISABLE",
9873 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
9874 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
9875 [__HLS_GOING_UP_BP] = "GOING_UP",
9876 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9877 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9878 };
9879
9880 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9881 return name ? name : "unknown";
9882}
9883
9884/* return the link state reason name */
9885static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9886{
9887 if (state == HLS_UP_INIT) {
9888 switch (ppd->linkinit_reason) {
9889 case OPA_LINKINIT_REASON_LINKUP:
9890 return "(LINKUP)";
9891 case OPA_LINKINIT_REASON_FLAPPING:
9892 return "(FLAPPING)";
9893 case OPA_LINKINIT_OUTSIDE_POLICY:
9894 return "(OUTSIDE_POLICY)";
9895 case OPA_LINKINIT_QUARANTINED:
9896 return "(QUARANTINED)";
9897 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9898 return "(INSUFIC_CAPABILITY)";
9899 default:
9900 break;
9901 }
9902 }
9903 return "";
9904}
9905
9906/*
9907 * driver_physical_state - convert the driver's notion of a port's
9908 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9909 * Return -1 (converted to a u32) to indicate error.
9910 */
9911u32 driver_physical_state(struct hfi1_pportdata *ppd)
9912{
9913 switch (ppd->host_link_state) {
9914 case HLS_UP_INIT:
9915 case HLS_UP_ARMED:
9916 case HLS_UP_ACTIVE:
9917 return IB_PORTPHYSSTATE_LINKUP;
9918 case HLS_DN_POLL:
9919 return IB_PORTPHYSSTATE_POLLING;
9920 case HLS_DN_DISABLE:
9921 return IB_PORTPHYSSTATE_DISABLED;
9922 case HLS_DN_OFFLINE:
9923 return OPA_PORTPHYSSTATE_OFFLINE;
9924 case HLS_VERIFY_CAP:
9925 return IB_PORTPHYSSTATE_POLLING;
9926 case HLS_GOING_UP:
9927 return IB_PORTPHYSSTATE_POLLING;
9928 case HLS_GOING_OFFLINE:
9929 return OPA_PORTPHYSSTATE_OFFLINE;
9930 case HLS_LINK_COOLDOWN:
9931 return OPA_PORTPHYSSTATE_OFFLINE;
9932 case HLS_DN_DOWNDEF:
9933 default:
9934 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9935 ppd->host_link_state);
9936 return -1;
9937 }
9938}
9939
9940/*
9941 * driver_logical_state - convert the driver's notion of a port's
9942 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9943 * (converted to a u32) to indicate error.
9944 */
9945u32 driver_logical_state(struct hfi1_pportdata *ppd)
9946{
9947 if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9948 return IB_PORT_DOWN;
9949
9950 switch (ppd->host_link_state & HLS_UP) {
9951 case HLS_UP_INIT:
9952 return IB_PORT_INIT;
9953 case HLS_UP_ARMED:
9954 return IB_PORT_ARMED;
9955 case HLS_UP_ACTIVE:
9956 return IB_PORT_ACTIVE;
9957 default:
9958 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9959 ppd->host_link_state);
9960 return -1;
9961 }
9962}
9963
9964void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
9965 u8 neigh_reason, u8 rem_reason)
9966{
9967 if (ppd->local_link_down_reason.latest == 0 &&
9968 ppd->neigh_link_down_reason.latest == 0) {
9969 ppd->local_link_down_reason.latest = lcl_reason;
9970 ppd->neigh_link_down_reason.latest = neigh_reason;
9971 ppd->remote_link_down_reason = rem_reason;
9972 }
9973}
9974
9975/*
9976 * Change the physical and/or logical link state.
9977 *
9978 * Do not call this routine while inside an interrupt. It contains
9979 * calls to routines that can take multiple seconds to finish.
9980 *
9981 * Returns 0 on success, -errno on failure.
9982 */
9983int set_link_state(struct hfi1_pportdata *ppd, u32 state)
9984{
9985 struct hfi1_devdata *dd = ppd->dd;
9986 struct ib_event event = {.device = NULL};
9987 int ret1, ret = 0;
9988 int was_up, is_down;
9989 int orig_new_state, poll_bounce;
9990
9991 mutex_lock(&ppd->hls_lock);
9992
9993 orig_new_state = state;
9994 if (state == HLS_DN_DOWNDEF)
9995 state = dd->link_default;
9996
9997 /* interpret poll -> poll as a link bounce */
9998 poll_bounce = ppd->host_link_state == HLS_DN_POLL
9999 && state == HLS_DN_POLL;
10000
10001 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10002 link_state_name(ppd->host_link_state),
10003 link_state_name(orig_new_state),
10004 poll_bounce ? "(bounce) " : "",
10005 link_state_reason_name(ppd, state));
10006
10007 was_up = !!(ppd->host_link_state & HLS_UP);
10008
10009 /*
10010 * If we're going to a (HLS_*) link state that implies the logical
10011 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10012 * reset is_sm_config_started to 0.
10013 */
10014 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10015 ppd->is_sm_config_started = 0;
10016
10017 /*
10018 * Do nothing if the states match. Let a poll to poll link bounce
10019 * go through.
10020 */
10021 if (ppd->host_link_state == state && !poll_bounce)
10022 goto done;
10023
10024 switch (state) {
10025 case HLS_UP_INIT:
10026 if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
10027 || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10028 /*
10029 * Quick link up jumps from polling to here.
10030 *
10031 * Whether in normal or loopback mode, the
10032 * simulator jumps from polling to link up.
10033 * Accept that here.
10034 */
10035 /* OK */;
10036 } else if (ppd->host_link_state != HLS_GOING_UP) {
10037 goto unexpected;
10038 }
10039
10040 ppd->host_link_state = HLS_UP_INIT;
10041 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10042 if (ret) {
10043 /* logical state didn't change, stay at going_up */
10044 ppd->host_link_state = HLS_GOING_UP;
10045 dd_dev_err(dd,
10046 "%s: logical state did not change to INIT\n",
10047 __func__);
10048 } else {
10049 /* clear old transient LINKINIT_REASON code */
10050 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10051 ppd->linkinit_reason =
10052 OPA_LINKINIT_REASON_LINKUP;
10053
10054 /* enable the port */
10055 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10056
10057 handle_linkup_change(dd, 1);
10058 }
10059 break;
10060 case HLS_UP_ARMED:
10061 if (ppd->host_link_state != HLS_UP_INIT)
10062 goto unexpected;
10063
10064 ppd->host_link_state = HLS_UP_ARMED;
10065 set_logical_state(dd, LSTATE_ARMED);
10066 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10067 if (ret) {
10068 /* logical state didn't change, stay at init */
10069 ppd->host_link_state = HLS_UP_INIT;
10070 dd_dev_err(dd,
10071 "%s: logical state did not change to ARMED\n",
10072 __func__);
10073 }
10074 /*
10075 * The simulator does not currently implement SMA messages,
10076 * so neighbor_normal is not set. Set it here when we first
10077 * move to Armed.
10078 */
10079 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10080 ppd->neighbor_normal = 1;
10081 break;
10082 case HLS_UP_ACTIVE:
10083 if (ppd->host_link_state != HLS_UP_ARMED)
10084 goto unexpected;
10085
10086 ppd->host_link_state = HLS_UP_ACTIVE;
10087 set_logical_state(dd, LSTATE_ACTIVE);
10088 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10089 if (ret) {
10090 /* logical state didn't change, stay at armed */
10091 ppd->host_link_state = HLS_UP_ARMED;
10092 dd_dev_err(dd,
10093 "%s: logical state did not change to ACTIVE\n",
10094 __func__);
10095 } else {
10096
10097 /* tell all engines to go running */
10098 sdma_all_running(dd);
10099
10100 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010101 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010102 event.element.port_num = ppd->port;
10103 event.event = IB_EVENT_PORT_ACTIVE;
10104 }
10105 break;
10106 case HLS_DN_POLL:
10107 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10108 ppd->host_link_state == HLS_DN_OFFLINE) &&
10109 dd->dc_shutdown)
10110 dc_start(dd);
10111 /* Hand LED control to the DC */
10112 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10113
10114 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10115 u8 tmp = ppd->link_enabled;
10116
10117 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10118 if (ret) {
10119 ppd->link_enabled = tmp;
10120 break;
10121 }
10122 ppd->remote_link_down_reason = 0;
10123
10124 if (ppd->driver_link_ready)
10125 ppd->link_enabled = 1;
10126 }
10127
Jim Snowfb9036d2016-01-11 18:32:21 -050010128 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010129 ret = set_local_link_attributes(ppd);
10130 if (ret)
10131 break;
10132
10133 ppd->port_error_action = 0;
10134 ppd->host_link_state = HLS_DN_POLL;
10135
10136 if (quick_linkup) {
10137 /* quick linkup does not go into polling */
10138 ret = do_quick_linkup(dd);
10139 } else {
10140 ret1 = set_physical_link_state(dd, PLS_POLLING);
10141 if (ret1 != HCMD_SUCCESS) {
10142 dd_dev_err(dd,
10143 "Failed to transition to Polling link state, return 0x%x\n",
10144 ret1);
10145 ret = -EINVAL;
10146 }
10147 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010148 ppd->offline_disabled_reason =
10149 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010150 /*
10151 * If an error occurred above, go back to offline. The
10152 * caller may reschedule another attempt.
10153 */
10154 if (ret)
10155 goto_offline(ppd, 0);
10156 break;
10157 case HLS_DN_DISABLE:
10158 /* link is disabled */
10159 ppd->link_enabled = 0;
10160
10161 /* allow any state to transition to disabled */
10162
10163 /* must transition to offline first */
10164 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10165 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10166 if (ret)
10167 break;
10168 ppd->remote_link_down_reason = 0;
10169 }
10170
10171 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10172 if (ret1 != HCMD_SUCCESS) {
10173 dd_dev_err(dd,
10174 "Failed to transition to Disabled link state, return 0x%x\n",
10175 ret1);
10176 ret = -EINVAL;
10177 break;
10178 }
10179 ppd->host_link_state = HLS_DN_DISABLE;
10180 dc_shutdown(dd);
10181 break;
10182 case HLS_DN_OFFLINE:
10183 if (ppd->host_link_state == HLS_DN_DISABLE)
10184 dc_start(dd);
10185
10186 /* allow any state to transition to offline */
10187 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10188 if (!ret)
10189 ppd->remote_link_down_reason = 0;
10190 break;
10191 case HLS_VERIFY_CAP:
10192 if (ppd->host_link_state != HLS_DN_POLL)
10193 goto unexpected;
10194 ppd->host_link_state = HLS_VERIFY_CAP;
10195 break;
10196 case HLS_GOING_UP:
10197 if (ppd->host_link_state != HLS_VERIFY_CAP)
10198 goto unexpected;
10199
10200 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10201 if (ret1 != HCMD_SUCCESS) {
10202 dd_dev_err(dd,
10203 "Failed to transition to link up state, return 0x%x\n",
10204 ret1);
10205 ret = -EINVAL;
10206 break;
10207 }
10208 ppd->host_link_state = HLS_GOING_UP;
10209 break;
10210
10211 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10212 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10213 default:
10214 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10215 __func__, state);
10216 ret = -EINVAL;
10217 break;
10218 }
10219
10220 is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10221 HLS_DN_DISABLE | HLS_DN_OFFLINE));
10222
10223 if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10224 ppd->neigh_link_down_reason.sma == 0) {
10225 ppd->local_link_down_reason.sma =
10226 ppd->local_link_down_reason.latest;
10227 ppd->neigh_link_down_reason.sma =
10228 ppd->neigh_link_down_reason.latest;
10229 }
10230
10231 goto done;
10232
10233unexpected:
10234 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10235 __func__, link_state_name(ppd->host_link_state),
10236 link_state_name(state));
10237 ret = -EINVAL;
10238
10239done:
10240 mutex_unlock(&ppd->hls_lock);
10241
10242 if (event.device)
10243 ib_dispatch_event(&event);
10244
10245 return ret;
10246}
10247
10248int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10249{
10250 u64 reg;
10251 int ret = 0;
10252
10253 switch (which) {
10254 case HFI1_IB_CFG_LIDLMC:
10255 set_lidlmc(ppd);
10256 break;
10257 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10258 /*
10259 * The VL Arbitrator high limit is sent in units of 4k
10260 * bytes, while HFI stores it in units of 64 bytes.
10261 */
10262 val *= 4096/64;
10263 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10264 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10265 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10266 break;
10267 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10268 /* HFI only supports POLL as the default link down state */
10269 if (val != HLS_DN_POLL)
10270 ret = -EINVAL;
10271 break;
10272 case HFI1_IB_CFG_OP_VLS:
10273 if (ppd->vls_operational != val) {
10274 ppd->vls_operational = val;
10275 if (!ppd->port)
10276 ret = -EINVAL;
10277 else
10278 ret = sdma_map_init(
10279 ppd->dd,
10280 ppd->port - 1,
10281 val,
10282 NULL);
10283 }
10284 break;
10285 /*
10286 * For link width, link width downgrade, and speed enable, always AND
10287 * the setting with what is actually supported. This has two benefits.
10288 * First, enabled can't have unsupported values, no matter what the
10289 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10290 * "fill in with your supported value" have all the bits in the
10291 * field set, so simply ANDing with supported has the desired result.
10292 */
10293 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10294 ppd->link_width_enabled = val & ppd->link_width_supported;
10295 break;
10296 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10297 ppd->link_width_downgrade_enabled =
10298 val & ppd->link_width_downgrade_supported;
10299 break;
10300 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10301 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10302 break;
10303 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10304 /*
10305 * HFI does not follow IB specs, save this value
10306 * so we can report it, if asked.
10307 */
10308 ppd->overrun_threshold = val;
10309 break;
10310 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10311 /*
10312 * HFI does not follow IB specs, save this value
10313 * so we can report it, if asked.
10314 */
10315 ppd->phy_error_threshold = val;
10316 break;
10317
10318 case HFI1_IB_CFG_MTU:
10319 set_send_length(ppd);
10320 break;
10321
10322 case HFI1_IB_CFG_PKEYS:
10323 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10324 set_partition_keys(ppd);
10325 break;
10326
10327 default:
10328 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10329 dd_dev_info(ppd->dd,
10330 "%s: which %s, val 0x%x: not implemented\n",
10331 __func__, ib_cfg_name(which), val);
10332 break;
10333 }
10334 return ret;
10335}
10336
10337/* begin functions related to vl arbitration table caching */
10338static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10339{
10340 int i;
10341
10342 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10343 VL_ARB_LOW_PRIO_TABLE_SIZE);
10344 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10345 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10346
10347 /*
10348 * Note that we always return values directly from the
10349 * 'vl_arb_cache' (and do no CSR reads) in response to a
10350 * 'Get(VLArbTable)'. This is obviously correct after a
10351 * 'Set(VLArbTable)', since the cache will then be up to
10352 * date. But it's also correct prior to any 'Set(VLArbTable)'
10353 * since then both the cache, and the relevant h/w registers
10354 * will be zeroed.
10355 */
10356
10357 for (i = 0; i < MAX_PRIO_TABLE; i++)
10358 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10359}
10360
10361/*
10362 * vl_arb_lock_cache
10363 *
10364 * All other vl_arb_* functions should be called only after locking
10365 * the cache.
10366 */
10367static inline struct vl_arb_cache *
10368vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10369{
10370 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10371 return NULL;
10372 spin_lock(&ppd->vl_arb_cache[idx].lock);
10373 return &ppd->vl_arb_cache[idx];
10374}
10375
10376static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10377{
10378 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10379}
10380
10381static void vl_arb_get_cache(struct vl_arb_cache *cache,
10382 struct ib_vl_weight_elem *vl)
10383{
10384 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10385}
10386
10387static void vl_arb_set_cache(struct vl_arb_cache *cache,
10388 struct ib_vl_weight_elem *vl)
10389{
10390 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10391}
10392
10393static int vl_arb_match_cache(struct vl_arb_cache *cache,
10394 struct ib_vl_weight_elem *vl)
10395{
10396 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10397}
10398/* end functions related to vl arbitration table caching */
10399
10400static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10401 u32 size, struct ib_vl_weight_elem *vl)
10402{
10403 struct hfi1_devdata *dd = ppd->dd;
10404 u64 reg;
10405 unsigned int i, is_up = 0;
10406 int drain, ret = 0;
10407
10408 mutex_lock(&ppd->hls_lock);
10409
10410 if (ppd->host_link_state & HLS_UP)
10411 is_up = 1;
10412
10413 drain = !is_ax(dd) && is_up;
10414
10415 if (drain)
10416 /*
10417 * Before adjusting VL arbitration weights, empty per-VL
10418 * FIFOs, otherwise a packet whose VL weight is being
10419 * set to 0 could get stuck in a FIFO with no chance to
10420 * egress.
10421 */
10422 ret = stop_drain_data_vls(dd);
10423
10424 if (ret) {
10425 dd_dev_err(
10426 dd,
10427 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10428 __func__);
10429 goto err;
10430 }
10431
10432 for (i = 0; i < size; i++, vl++) {
10433 /*
10434 * NOTE: The low priority shift and mask are used here, but
10435 * they are the same for both the low and high registers.
10436 */
10437 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10438 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10439 | (((u64)vl->weight
10440 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10441 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10442 write_csr(dd, target + (i * 8), reg);
10443 }
10444 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10445
10446 if (drain)
10447 open_fill_data_vls(dd); /* reopen all VLs */
10448
10449err:
10450 mutex_unlock(&ppd->hls_lock);
10451
10452 return ret;
10453}
10454
10455/*
10456 * Read one credit merge VL register.
10457 */
10458static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10459 struct vl_limit *vll)
10460{
10461 u64 reg = read_csr(dd, csr);
10462
10463 vll->dedicated = cpu_to_be16(
10464 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10465 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10466 vll->shared = cpu_to_be16(
10467 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10468 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10469}
10470
10471/*
10472 * Read the current credit merge limits.
10473 */
10474static int get_buffer_control(struct hfi1_devdata *dd,
10475 struct buffer_control *bc, u16 *overall_limit)
10476{
10477 u64 reg;
10478 int i;
10479
10480 /* not all entries are filled in */
10481 memset(bc, 0, sizeof(*bc));
10482
10483 /* OPA and HFI have a 1-1 mapping */
10484 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10485 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
10486
10487 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10488 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10489
10490 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10491 bc->overall_shared_limit = cpu_to_be16(
10492 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10493 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10494 if (overall_limit)
10495 *overall_limit = (reg
10496 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10497 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10498 return sizeof(struct buffer_control);
10499}
10500
10501static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10502{
10503 u64 reg;
10504 int i;
10505
10506 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10507 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10508 for (i = 0; i < sizeof(u64); i++) {
10509 u8 byte = *(((u8 *)&reg) + i);
10510
10511 dp->vlnt[2 * i] = byte & 0xf;
10512 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10513 }
10514
10515 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10516 for (i = 0; i < sizeof(u64); i++) {
10517 u8 byte = *(((u8 *)&reg) + i);
10518
10519 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10520 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10521 }
10522 return sizeof(struct sc2vlnt);
10523}
10524
10525static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10526 struct ib_vl_weight_elem *vl)
10527{
10528 unsigned int i;
10529
10530 for (i = 0; i < nelems; i++, vl++) {
10531 vl->vl = 0xf;
10532 vl->weight = 0;
10533 }
10534}
10535
10536static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10537{
10538 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10539 DC_SC_VL_VAL(15_0,
10540 0, dp->vlnt[0] & 0xf,
10541 1, dp->vlnt[1] & 0xf,
10542 2, dp->vlnt[2] & 0xf,
10543 3, dp->vlnt[3] & 0xf,
10544 4, dp->vlnt[4] & 0xf,
10545 5, dp->vlnt[5] & 0xf,
10546 6, dp->vlnt[6] & 0xf,
10547 7, dp->vlnt[7] & 0xf,
10548 8, dp->vlnt[8] & 0xf,
10549 9, dp->vlnt[9] & 0xf,
10550 10, dp->vlnt[10] & 0xf,
10551 11, dp->vlnt[11] & 0xf,
10552 12, dp->vlnt[12] & 0xf,
10553 13, dp->vlnt[13] & 0xf,
10554 14, dp->vlnt[14] & 0xf,
10555 15, dp->vlnt[15] & 0xf));
10556 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10557 DC_SC_VL_VAL(31_16,
10558 16, dp->vlnt[16] & 0xf,
10559 17, dp->vlnt[17] & 0xf,
10560 18, dp->vlnt[18] & 0xf,
10561 19, dp->vlnt[19] & 0xf,
10562 20, dp->vlnt[20] & 0xf,
10563 21, dp->vlnt[21] & 0xf,
10564 22, dp->vlnt[22] & 0xf,
10565 23, dp->vlnt[23] & 0xf,
10566 24, dp->vlnt[24] & 0xf,
10567 25, dp->vlnt[25] & 0xf,
10568 26, dp->vlnt[26] & 0xf,
10569 27, dp->vlnt[27] & 0xf,
10570 28, dp->vlnt[28] & 0xf,
10571 29, dp->vlnt[29] & 0xf,
10572 30, dp->vlnt[30] & 0xf,
10573 31, dp->vlnt[31] & 0xf));
10574}
10575
10576static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10577 u16 limit)
10578{
10579 if (limit != 0)
10580 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10581 what, (int)limit, idx);
10582}
10583
10584/* change only the shared limit portion of SendCmGLobalCredit */
10585static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10586{
10587 u64 reg;
10588
10589 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10590 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10591 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10592 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10593}
10594
10595/* change only the total credit limit portion of SendCmGLobalCredit */
10596static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10597{
10598 u64 reg;
10599
10600 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10601 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10602 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10603 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10604}
10605
10606/* set the given per-VL shared limit */
10607static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10608{
10609 u64 reg;
10610 u32 addr;
10611
10612 if (vl < TXE_NUM_DATA_VL)
10613 addr = SEND_CM_CREDIT_VL + (8 * vl);
10614 else
10615 addr = SEND_CM_CREDIT_VL15;
10616
10617 reg = read_csr(dd, addr);
10618 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10619 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10620 write_csr(dd, addr, reg);
10621}
10622
10623/* set the given per-VL dedicated limit */
10624static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10625{
10626 u64 reg;
10627 u32 addr;
10628
10629 if (vl < TXE_NUM_DATA_VL)
10630 addr = SEND_CM_CREDIT_VL + (8 * vl);
10631 else
10632 addr = SEND_CM_CREDIT_VL15;
10633
10634 reg = read_csr(dd, addr);
10635 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10636 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10637 write_csr(dd, addr, reg);
10638}
10639
10640/* spin until the given per-VL status mask bits clear */
10641static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10642 const char *which)
10643{
10644 unsigned long timeout;
10645 u64 reg;
10646
10647 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10648 while (1) {
10649 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10650
10651 if (reg == 0)
10652 return; /* success */
10653 if (time_after(jiffies, timeout))
10654 break; /* timed out */
10655 udelay(1);
10656 }
10657
10658 dd_dev_err(dd,
10659 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10660 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10661 /*
10662 * If this occurs, it is likely there was a credit loss on the link.
10663 * The only recovery from that is a link bounce.
10664 */
10665 dd_dev_err(dd,
10666 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
10667}
10668
10669/*
10670 * The number of credits on the VLs may be changed while everything
10671 * is "live", but the following algorithm must be followed due to
10672 * how the hardware is actually implemented. In particular,
10673 * Return_Credit_Status[] is the only correct status check.
10674 *
10675 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10676 * set Global_Shared_Credit_Limit = 0
10677 * use_all_vl = 1
10678 * mask0 = all VLs that are changing either dedicated or shared limits
10679 * set Shared_Limit[mask0] = 0
10680 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10681 * if (changing any dedicated limit)
10682 * mask1 = all VLs that are lowering dedicated limits
10683 * lower Dedicated_Limit[mask1]
10684 * spin until Return_Credit_Status[mask1] == 0
10685 * raise Dedicated_Limits
10686 * raise Shared_Limits
10687 * raise Global_Shared_Credit_Limit
10688 *
10689 * lower = if the new limit is lower, set the limit to the new value
10690 * raise = if the new limit is higher than the current value (may be changed
10691 * earlier in the algorithm), set the new limit to the new value
10692 */
10693static int set_buffer_control(struct hfi1_devdata *dd,
10694 struct buffer_control *new_bc)
10695{
10696 u64 changing_mask, ld_mask, stat_mask;
10697 int change_count;
10698 int i, use_all_mask;
10699 int this_shared_changing;
10700 /*
10701 * A0: add the variable any_shared_limit_changing below and in the
10702 * algorithm above. If removing A0 support, it can be removed.
10703 */
10704 int any_shared_limit_changing;
10705 struct buffer_control cur_bc;
10706 u8 changing[OPA_MAX_VLS];
10707 u8 lowering_dedicated[OPA_MAX_VLS];
10708 u16 cur_total;
10709 u32 new_total = 0;
10710 const u64 all_mask =
10711 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10712 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10713 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10714 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10715 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10716 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10717 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10718 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10719 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10720
10721#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10722#define NUM_USABLE_VLS 16 /* look at VL15 and less */
10723
10724
10725 /* find the new total credits, do sanity check on unused VLs */
10726 for (i = 0; i < OPA_MAX_VLS; i++) {
10727 if (valid_vl(i)) {
10728 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10729 continue;
10730 }
10731 nonzero_msg(dd, i, "dedicated",
10732 be16_to_cpu(new_bc->vl[i].dedicated));
10733 nonzero_msg(dd, i, "shared",
10734 be16_to_cpu(new_bc->vl[i].shared));
10735 new_bc->vl[i].dedicated = 0;
10736 new_bc->vl[i].shared = 0;
10737 }
10738 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050010739
Mike Marciniszyn77241052015-07-30 15:17:43 -040010740 /* fetch the current values */
10741 get_buffer_control(dd, &cur_bc, &cur_total);
10742
10743 /*
10744 * Create the masks we will use.
10745 */
10746 memset(changing, 0, sizeof(changing));
10747 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10748 /* NOTE: Assumes that the individual VL bits are adjacent and in
10749 increasing order */
10750 stat_mask =
10751 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10752 changing_mask = 0;
10753 ld_mask = 0;
10754 change_count = 0;
10755 any_shared_limit_changing = 0;
10756 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10757 if (!valid_vl(i))
10758 continue;
10759 this_shared_changing = new_bc->vl[i].shared
10760 != cur_bc.vl[i].shared;
10761 if (this_shared_changing)
10762 any_shared_limit_changing = 1;
10763 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
10764 || this_shared_changing) {
10765 changing[i] = 1;
10766 changing_mask |= stat_mask;
10767 change_count++;
10768 }
10769 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10770 be16_to_cpu(cur_bc.vl[i].dedicated)) {
10771 lowering_dedicated[i] = 1;
10772 ld_mask |= stat_mask;
10773 }
10774 }
10775
10776 /* bracket the credit change with a total adjustment */
10777 if (new_total > cur_total)
10778 set_global_limit(dd, new_total);
10779
10780 /*
10781 * Start the credit change algorithm.
10782 */
10783 use_all_mask = 0;
10784 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010785 be16_to_cpu(cur_bc.overall_shared_limit)) ||
10786 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010787 set_global_shared(dd, 0);
10788 cur_bc.overall_shared_limit = 0;
10789 use_all_mask = 1;
10790 }
10791
10792 for (i = 0; i < NUM_USABLE_VLS; i++) {
10793 if (!valid_vl(i))
10794 continue;
10795
10796 if (changing[i]) {
10797 set_vl_shared(dd, i, 0);
10798 cur_bc.vl[i].shared = 0;
10799 }
10800 }
10801
10802 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10803 "shared");
10804
10805 if (change_count > 0) {
10806 for (i = 0; i < NUM_USABLE_VLS; i++) {
10807 if (!valid_vl(i))
10808 continue;
10809
10810 if (lowering_dedicated[i]) {
10811 set_vl_dedicated(dd, i,
10812 be16_to_cpu(new_bc->vl[i].dedicated));
10813 cur_bc.vl[i].dedicated =
10814 new_bc->vl[i].dedicated;
10815 }
10816 }
10817
10818 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10819
10820 /* now raise all dedicated that are going up */
10821 for (i = 0; i < NUM_USABLE_VLS; i++) {
10822 if (!valid_vl(i))
10823 continue;
10824
10825 if (be16_to_cpu(new_bc->vl[i].dedicated) >
10826 be16_to_cpu(cur_bc.vl[i].dedicated))
10827 set_vl_dedicated(dd, i,
10828 be16_to_cpu(new_bc->vl[i].dedicated));
10829 }
10830 }
10831
10832 /* next raise all shared that are going up */
10833 for (i = 0; i < NUM_USABLE_VLS; i++) {
10834 if (!valid_vl(i))
10835 continue;
10836
10837 if (be16_to_cpu(new_bc->vl[i].shared) >
10838 be16_to_cpu(cur_bc.vl[i].shared))
10839 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10840 }
10841
10842 /* finally raise the global shared */
10843 if (be16_to_cpu(new_bc->overall_shared_limit) >
10844 be16_to_cpu(cur_bc.overall_shared_limit))
10845 set_global_shared(dd,
10846 be16_to_cpu(new_bc->overall_shared_limit));
10847
10848 /* bracket the credit change with a total adjustment */
10849 if (new_total < cur_total)
10850 set_global_limit(dd, new_total);
10851 return 0;
10852}
10853
10854/*
10855 * Read the given fabric manager table. Return the size of the
10856 * table (in bytes) on success, and a negative error code on
10857 * failure.
10858 */
10859int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10860
10861{
10862 int size;
10863 struct vl_arb_cache *vlc;
10864
10865 switch (which) {
10866 case FM_TBL_VL_HIGH_ARB:
10867 size = 256;
10868 /*
10869 * OPA specifies 128 elements (of 2 bytes each), though
10870 * HFI supports only 16 elements in h/w.
10871 */
10872 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10873 vl_arb_get_cache(vlc, t);
10874 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10875 break;
10876 case FM_TBL_VL_LOW_ARB:
10877 size = 256;
10878 /*
10879 * OPA specifies 128 elements (of 2 bytes each), though
10880 * HFI supports only 16 elements in h/w.
10881 */
10882 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10883 vl_arb_get_cache(vlc, t);
10884 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10885 break;
10886 case FM_TBL_BUFFER_CONTROL:
10887 size = get_buffer_control(ppd->dd, t, NULL);
10888 break;
10889 case FM_TBL_SC2VLNT:
10890 size = get_sc2vlnt(ppd->dd, t);
10891 break;
10892 case FM_TBL_VL_PREEMPT_ELEMS:
10893 size = 256;
10894 /* OPA specifies 128 elements, of 2 bytes each */
10895 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10896 break;
10897 case FM_TBL_VL_PREEMPT_MATRIX:
10898 size = 256;
10899 /*
10900 * OPA specifies that this is the same size as the VL
10901 * arbitration tables (i.e., 256 bytes).
10902 */
10903 break;
10904 default:
10905 return -EINVAL;
10906 }
10907 return size;
10908}
10909
10910/*
10911 * Write the given fabric manager table.
10912 */
10913int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10914{
10915 int ret = 0;
10916 struct vl_arb_cache *vlc;
10917
10918 switch (which) {
10919 case FM_TBL_VL_HIGH_ARB:
10920 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10921 if (vl_arb_match_cache(vlc, t)) {
10922 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10923 break;
10924 }
10925 vl_arb_set_cache(vlc, t);
10926 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10927 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10928 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10929 break;
10930 case FM_TBL_VL_LOW_ARB:
10931 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10932 if (vl_arb_match_cache(vlc, t)) {
10933 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10934 break;
10935 }
10936 vl_arb_set_cache(vlc, t);
10937 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10938 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10939 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10940 break;
10941 case FM_TBL_BUFFER_CONTROL:
10942 ret = set_buffer_control(ppd->dd, t);
10943 break;
10944 case FM_TBL_SC2VLNT:
10945 set_sc2vlnt(ppd->dd, t);
10946 break;
10947 default:
10948 ret = -EINVAL;
10949 }
10950 return ret;
10951}
10952
10953/*
10954 * Disable all data VLs.
10955 *
10956 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
10957 */
10958static int disable_data_vls(struct hfi1_devdata *dd)
10959{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010960 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010961 return 1;
10962
10963 pio_send_control(dd, PSC_DATA_VL_DISABLE);
10964
10965 return 0;
10966}
10967
10968/*
10969 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
10970 * Just re-enables all data VLs (the "fill" part happens
10971 * automatically - the name was chosen for symmetry with
10972 * stop_drain_data_vls()).
10973 *
10974 * Return 0 if successful, non-zero if the VLs cannot be enabled.
10975 */
10976int open_fill_data_vls(struct hfi1_devdata *dd)
10977{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050010978 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010979 return 1;
10980
10981 pio_send_control(dd, PSC_DATA_VL_ENABLE);
10982
10983 return 0;
10984}
10985
10986/*
10987 * drain_data_vls() - assumes that disable_data_vls() has been called,
10988 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
10989 * engines to drop to 0.
10990 */
10991static void drain_data_vls(struct hfi1_devdata *dd)
10992{
10993 sc_wait(dd);
10994 sdma_wait(dd);
10995 pause_for_credit_return(dd);
10996}
10997
10998/*
10999 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11000 *
11001 * Use open_fill_data_vls() to resume using data VLs. This pair is
11002 * meant to be used like this:
11003 *
11004 * stop_drain_data_vls(dd);
11005 * // do things with per-VL resources
11006 * open_fill_data_vls(dd);
11007 */
11008int stop_drain_data_vls(struct hfi1_devdata *dd)
11009{
11010 int ret;
11011
11012 ret = disable_data_vls(dd);
11013 if (ret == 0)
11014 drain_data_vls(dd);
11015
11016 return ret;
11017}
11018
11019/*
11020 * Convert a nanosecond time to a cclock count. No matter how slow
11021 * the cclock, a non-zero ns will always have a non-zero result.
11022 */
11023u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11024{
11025 u32 cclocks;
11026
11027 if (dd->icode == ICODE_FPGA_EMULATION)
11028 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11029 else /* simulation pretends to be ASIC */
11030 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11031 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11032 cclocks = 1;
11033 return cclocks;
11034}
11035
11036/*
11037 * Convert a cclock count to nanoseconds. Not matter how slow
11038 * the cclock, a non-zero cclocks will always have a non-zero result.
11039 */
11040u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11041{
11042 u32 ns;
11043
11044 if (dd->icode == ICODE_FPGA_EMULATION)
11045 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11046 else /* simulation pretends to be ASIC */
11047 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11048 if (cclocks && !ns)
11049 ns = 1;
11050 return ns;
11051}
11052
11053/*
11054 * Dynamically adjust the receive interrupt timeout for a context based on
11055 * incoming packet rate.
11056 *
11057 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11058 */
11059static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11060{
11061 struct hfi1_devdata *dd = rcd->dd;
11062 u32 timeout = rcd->rcvavail_timeout;
11063
11064 /*
11065 * This algorithm doubles or halves the timeout depending on whether
11066 * the number of packets received in this interrupt were less than or
11067 * greater equal the interrupt count.
11068 *
11069 * The calculations below do not allow a steady state to be achieved.
11070 * Only at the endpoints it is possible to have an unchanging
11071 * timeout.
11072 */
11073 if (npkts < rcv_intr_count) {
11074 /*
11075 * Not enough packets arrived before the timeout, adjust
11076 * timeout downward.
11077 */
11078 if (timeout < 2) /* already at minimum? */
11079 return;
11080 timeout >>= 1;
11081 } else {
11082 /*
11083 * More than enough packets arrived before the timeout, adjust
11084 * timeout upward.
11085 */
11086 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11087 return;
11088 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11089 }
11090
11091 rcd->rcvavail_timeout = timeout;
11092 /* timeout cannot be larger than rcv_intr_timeout_csr which has already
11093 been verified to be in range */
11094 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11095 (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11096}
11097
11098void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11099 u32 intr_adjust, u32 npkts)
11100{
11101 struct hfi1_devdata *dd = rcd->dd;
11102 u64 reg;
11103 u32 ctxt = rcd->ctxt;
11104
11105 /*
11106 * Need to write timeout register before updating RcvHdrHead to ensure
11107 * that a new value is used when the HW decides to restart counting.
11108 */
11109 if (intr_adjust)
11110 adjust_rcv_timeout(rcd, npkts);
11111 if (updegr) {
11112 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11113 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11114 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11115 }
11116 mmiowb();
11117 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11118 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11119 << RCV_HDR_HEAD_HEAD_SHIFT);
11120 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11121 mmiowb();
11122}
11123
11124u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11125{
11126 u32 head, tail;
11127
11128 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11129 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11130
11131 if (rcd->rcvhdrtail_kvaddr)
11132 tail = get_rcvhdrtail(rcd);
11133 else
11134 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11135
11136 return head == tail;
11137}
11138
11139/*
11140 * Context Control and Receive Array encoding for buffer size:
11141 * 0x0 invalid
11142 * 0x1 4 KB
11143 * 0x2 8 KB
11144 * 0x3 16 KB
11145 * 0x4 32 KB
11146 * 0x5 64 KB
11147 * 0x6 128 KB
11148 * 0x7 256 KB
11149 * 0x8 512 KB (Receive Array only)
11150 * 0x9 1 MB (Receive Array only)
11151 * 0xa 2 MB (Receive Array only)
11152 *
11153 * 0xB-0xF - reserved (Receive Array only)
11154 *
11155 *
11156 * This routine assumes that the value has already been sanity checked.
11157 */
11158static u32 encoded_size(u32 size)
11159{
11160 switch (size) {
11161 case 4*1024: return 0x1;
11162 case 8*1024: return 0x2;
11163 case 16*1024: return 0x3;
11164 case 32*1024: return 0x4;
11165 case 64*1024: return 0x5;
11166 case 128*1024: return 0x6;
11167 case 256*1024: return 0x7;
11168 case 512*1024: return 0x8;
11169 case 1*1024*1024: return 0x9;
11170 case 2*1024*1024: return 0xa;
11171 }
11172 return 0x1; /* if invalid, go with the minimum size */
11173}
11174
11175void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11176{
11177 struct hfi1_ctxtdata *rcd;
11178 u64 rcvctrl, reg;
11179 int did_enable = 0;
11180
11181 rcd = dd->rcd[ctxt];
11182 if (!rcd)
11183 return;
11184
11185 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11186
11187 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11188 /* if the context already enabled, don't do the extra steps */
11189 if ((op & HFI1_RCVCTRL_CTXT_ENB)
11190 && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11191 /* reset the tail and hdr addresses, and sequence count */
11192 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11193 rcd->rcvhdrq_phys);
11194 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11195 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11196 rcd->rcvhdrqtailaddr_phys);
11197 rcd->seq_cnt = 1;
11198
11199 /* reset the cached receive header queue head value */
11200 rcd->head = 0;
11201
11202 /*
11203 * Zero the receive header queue so we don't get false
11204 * positives when checking the sequence number. The
11205 * sequence numbers could land exactly on the same spot.
11206 * E.g. a rcd restart before the receive header wrapped.
11207 */
11208 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11209
11210 /* starting timeout */
11211 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11212
11213 /* enable the context */
11214 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11215
11216 /* clean the egr buffer size first */
11217 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11218 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11219 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11220 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11221
11222 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11223 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11224 did_enable = 1;
11225
11226 /* zero RcvEgrIndexHead */
11227 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11228
11229 /* set eager count and base index */
11230 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11231 & RCV_EGR_CTRL_EGR_CNT_MASK)
11232 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11233 (((rcd->eager_base >> RCV_SHIFT)
11234 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11235 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11236 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11237
11238 /*
11239 * Set TID (expected) count and base index.
11240 * rcd->expected_count is set to individual RcvArray entries,
11241 * not pairs, and the CSR takes a pair-count in groups of
11242 * four, so divide by 8.
11243 */
11244 reg = (((rcd->expected_count >> RCV_SHIFT)
11245 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11246 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11247 (((rcd->expected_base >> RCV_SHIFT)
11248 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11249 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11250 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011251 if (ctxt == HFI1_CTRL_CTXT)
11252 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011253 }
11254 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11255 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011256 /*
11257 * When receive context is being disabled turn on tail
11258 * update with a dummy tail address and then disable
11259 * receive context.
11260 */
11261 if (dd->rcvhdrtail_dummy_physaddr) {
11262 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11263 dd->rcvhdrtail_dummy_physaddr);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011264 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011265 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11266 }
11267
Mike Marciniszyn77241052015-07-30 15:17:43 -040011268 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11269 }
11270 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11271 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11272 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11273 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11274 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11275 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011276 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11277 /* See comment on RcvCtxtCtrl.TailUpd above */
11278 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11279 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11280 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011281 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11282 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11283 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11284 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11285 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11286 /* In one-packet-per-eager mode, the size comes from
11287 the RcvArray entry. */
11288 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11289 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11290 }
11291 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11292 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11293 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11294 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11295 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11296 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11297 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11298 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11299 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11300 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11301 rcd->rcvctrl = rcvctrl;
11302 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11303 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11304
11305 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11306 if (did_enable
11307 && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11308 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11309 if (reg != 0) {
11310 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11311 ctxt, reg);
11312 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11313 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11314 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11315 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11316 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11317 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11318 ctxt, reg, reg == 0 ? "not" : "still");
11319 }
11320 }
11321
11322 if (did_enable) {
11323 /*
11324 * The interrupt timeout and count must be set after
11325 * the context is enabled to take effect.
11326 */
11327 /* set interrupt timeout */
11328 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11329 (u64)rcd->rcvavail_timeout <<
11330 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11331
11332 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11333 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11334 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11335 }
11336
11337 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11338 /*
11339 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011340 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11341 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011342 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011343 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11344 dd->rcvhdrtail_dummy_physaddr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011345}
11346
11347u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
11348 u64 **cntrp)
11349{
11350 int ret;
11351 u64 val = 0;
11352
11353 if (namep) {
11354 ret = dd->cntrnameslen;
11355 if (pos != 0) {
11356 dd_dev_err(dd, "read_cntrs does not support indexing");
11357 return 0;
11358 }
11359 *namep = dd->cntrnames;
11360 } else {
11361 const struct cntr_entry *entry;
11362 int i, j;
11363
11364 ret = (dd->ndevcntrs) * sizeof(u64);
11365 if (pos != 0) {
11366 dd_dev_err(dd, "read_cntrs does not support indexing");
11367 return 0;
11368 }
11369
11370 /* Get the start of the block of counters */
11371 *cntrp = dd->cntrs;
11372
11373 /*
11374 * Now go and fill in each counter in the block.
11375 */
11376 for (i = 0; i < DEV_CNTR_LAST; i++) {
11377 entry = &dev_cntrs[i];
11378 hfi1_cdbg(CNTR, "reading %s", entry->name);
11379 if (entry->flags & CNTR_DISABLED) {
11380 /* Nothing */
11381 hfi1_cdbg(CNTR, "\tDisabled\n");
11382 } else {
11383 if (entry->flags & CNTR_VL) {
11384 hfi1_cdbg(CNTR, "\tPer VL\n");
11385 for (j = 0; j < C_VL_COUNT; j++) {
11386 val = entry->rw_cntr(entry,
11387 dd, j,
11388 CNTR_MODE_R,
11389 0);
11390 hfi1_cdbg(
11391 CNTR,
11392 "\t\tRead 0x%llx for %d\n",
11393 val, j);
11394 dd->cntrs[entry->offset + j] =
11395 val;
11396 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011397 } else if (entry->flags & CNTR_SDMA) {
11398 hfi1_cdbg(CNTR,
11399 "\t Per SDMA Engine\n");
11400 for (j = 0; j < dd->chip_sdma_engines;
11401 j++) {
11402 val =
11403 entry->rw_cntr(entry, dd, j,
11404 CNTR_MODE_R, 0);
11405 hfi1_cdbg(CNTR,
11406 "\t\tRead 0x%llx for %d\n",
11407 val, j);
11408 dd->cntrs[entry->offset + j] =
11409 val;
11410 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011411 } else {
11412 val = entry->rw_cntr(entry, dd,
11413 CNTR_INVALID_VL,
11414 CNTR_MODE_R, 0);
11415 dd->cntrs[entry->offset] = val;
11416 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11417 }
11418 }
11419 }
11420 }
11421 return ret;
11422}
11423
11424/*
11425 * Used by sysfs to create files for hfi stats to read
11426 */
11427u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
11428 char **namep, u64 **cntrp)
11429{
11430 int ret;
11431 u64 val = 0;
11432
11433 if (namep) {
11434 ret = dd->portcntrnameslen;
11435 if (pos != 0) {
11436 dd_dev_err(dd, "index not supported");
11437 return 0;
11438 }
11439 *namep = dd->portcntrnames;
11440 } else {
11441 const struct cntr_entry *entry;
11442 struct hfi1_pportdata *ppd;
11443 int i, j;
11444
11445 ret = (dd->nportcntrs) * sizeof(u64);
11446 if (pos != 0) {
11447 dd_dev_err(dd, "indexing not supported");
11448 return 0;
11449 }
11450 ppd = (struct hfi1_pportdata *)(dd + 1 + port);
11451 *cntrp = ppd->cntrs;
11452
11453 for (i = 0; i < PORT_CNTR_LAST; i++) {
11454 entry = &port_cntrs[i];
11455 hfi1_cdbg(CNTR, "reading %s", entry->name);
11456 if (entry->flags & CNTR_DISABLED) {
11457 /* Nothing */
11458 hfi1_cdbg(CNTR, "\tDisabled\n");
11459 continue;
11460 }
11461
11462 if (entry->flags & CNTR_VL) {
11463 hfi1_cdbg(CNTR, "\tPer VL");
11464 for (j = 0; j < C_VL_COUNT; j++) {
11465 val = entry->rw_cntr(entry, ppd, j,
11466 CNTR_MODE_R,
11467 0);
11468 hfi1_cdbg(
11469 CNTR,
11470 "\t\tRead 0x%llx for %d",
11471 val, j);
11472 ppd->cntrs[entry->offset + j] = val;
11473 }
11474 } else {
11475 val = entry->rw_cntr(entry, ppd,
11476 CNTR_INVALID_VL,
11477 CNTR_MODE_R,
11478 0);
11479 ppd->cntrs[entry->offset] = val;
11480 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11481 }
11482 }
11483 }
11484 return ret;
11485}
11486
11487static void free_cntrs(struct hfi1_devdata *dd)
11488{
11489 struct hfi1_pportdata *ppd;
11490 int i;
11491
11492 if (dd->synth_stats_timer.data)
11493 del_timer_sync(&dd->synth_stats_timer);
11494 dd->synth_stats_timer.data = 0;
11495 ppd = (struct hfi1_pportdata *)(dd + 1);
11496 for (i = 0; i < dd->num_pports; i++, ppd++) {
11497 kfree(ppd->cntrs);
11498 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011499 free_percpu(ppd->ibport_data.rvp.rc_acks);
11500 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11501 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011502 ppd->cntrs = NULL;
11503 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011504 ppd->ibport_data.rvp.rc_acks = NULL;
11505 ppd->ibport_data.rvp.rc_qacks = NULL;
11506 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011507 }
11508 kfree(dd->portcntrnames);
11509 dd->portcntrnames = NULL;
11510 kfree(dd->cntrs);
11511 dd->cntrs = NULL;
11512 kfree(dd->scntrs);
11513 dd->scntrs = NULL;
11514 kfree(dd->cntrnames);
11515 dd->cntrnames = NULL;
11516}
11517
11518#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11519#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11520
11521static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11522 u64 *psval, void *context, int vl)
11523{
11524 u64 val;
11525 u64 sval = *psval;
11526
11527 if (entry->flags & CNTR_DISABLED) {
11528 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11529 return 0;
11530 }
11531
11532 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11533
11534 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11535
11536 /* If its a synthetic counter there is more work we need to do */
11537 if (entry->flags & CNTR_SYNTH) {
11538 if (sval == CNTR_MAX) {
11539 /* No need to read already saturated */
11540 return CNTR_MAX;
11541 }
11542
11543 if (entry->flags & CNTR_32BIT) {
11544 /* 32bit counters can wrap multiple times */
11545 u64 upper = sval >> 32;
11546 u64 lower = (sval << 32) >> 32;
11547
11548 if (lower > val) { /* hw wrapped */
11549 if (upper == CNTR_32BIT_MAX)
11550 val = CNTR_MAX;
11551 else
11552 upper++;
11553 }
11554
11555 if (val != CNTR_MAX)
11556 val = (upper << 32) | val;
11557
11558 } else {
11559 /* If we rolled we are saturated */
11560 if ((val < sval) || (val > CNTR_MAX))
11561 val = CNTR_MAX;
11562 }
11563 }
11564
11565 *psval = val;
11566
11567 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11568
11569 return val;
11570}
11571
11572static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11573 struct cntr_entry *entry,
11574 u64 *psval, void *context, int vl, u64 data)
11575{
11576 u64 val;
11577
11578 if (entry->flags & CNTR_DISABLED) {
11579 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11580 return 0;
11581 }
11582
11583 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11584
11585 if (entry->flags & CNTR_SYNTH) {
11586 *psval = data;
11587 if (entry->flags & CNTR_32BIT) {
11588 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11589 (data << 32) >> 32);
11590 val = data; /* return the full 64bit value */
11591 } else {
11592 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11593 data);
11594 }
11595 } else {
11596 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11597 }
11598
11599 *psval = val;
11600
11601 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11602
11603 return val;
11604}
11605
11606u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11607{
11608 struct cntr_entry *entry;
11609 u64 *sval;
11610
11611 entry = &dev_cntrs[index];
11612 sval = dd->scntrs + entry->offset;
11613
11614 if (vl != CNTR_INVALID_VL)
11615 sval += vl;
11616
11617 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11618}
11619
11620u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11621{
11622 struct cntr_entry *entry;
11623 u64 *sval;
11624
11625 entry = &dev_cntrs[index];
11626 sval = dd->scntrs + entry->offset;
11627
11628 if (vl != CNTR_INVALID_VL)
11629 sval += vl;
11630
11631 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11632}
11633
11634u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11635{
11636 struct cntr_entry *entry;
11637 u64 *sval;
11638
11639 entry = &port_cntrs[index];
11640 sval = ppd->scntrs + entry->offset;
11641
11642 if (vl != CNTR_INVALID_VL)
11643 sval += vl;
11644
11645 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11646 (index <= C_RCV_HDR_OVF_LAST)) {
11647 /* We do not want to bother for disabled contexts */
11648 return 0;
11649 }
11650
11651 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11652}
11653
11654u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11655{
11656 struct cntr_entry *entry;
11657 u64 *sval;
11658
11659 entry = &port_cntrs[index];
11660 sval = ppd->scntrs + entry->offset;
11661
11662 if (vl != CNTR_INVALID_VL)
11663 sval += vl;
11664
11665 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11666 (index <= C_RCV_HDR_OVF_LAST)) {
11667 /* We do not want to bother for disabled contexts */
11668 return 0;
11669 }
11670
11671 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11672}
11673
11674static void update_synth_timer(unsigned long opaque)
11675{
11676 u64 cur_tx;
11677 u64 cur_rx;
11678 u64 total_flits;
11679 u8 update = 0;
11680 int i, j, vl;
11681 struct hfi1_pportdata *ppd;
11682 struct cntr_entry *entry;
11683
11684 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11685
11686 /*
11687 * Rather than keep beating on the CSRs pick a minimal set that we can
11688 * check to watch for potential roll over. We can do this by looking at
11689 * the number of flits sent/recv. If the total flits exceeds 32bits then
11690 * we have to iterate all the counters and update.
11691 */
11692 entry = &dev_cntrs[C_DC_RCV_FLITS];
11693 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11694
11695 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11696 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11697
11698 hfi1_cdbg(
11699 CNTR,
11700 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11701 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11702
11703 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11704 /*
11705 * May not be strictly necessary to update but it won't hurt and
11706 * simplifies the logic here.
11707 */
11708 update = 1;
11709 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11710 dd->unit);
11711 } else {
11712 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11713 hfi1_cdbg(CNTR,
11714 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11715 total_flits, (u64)CNTR_32BIT_MAX);
11716 if (total_flits >= CNTR_32BIT_MAX) {
11717 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11718 dd->unit);
11719 update = 1;
11720 }
11721 }
11722
11723 if (update) {
11724 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11725 for (i = 0; i < DEV_CNTR_LAST; i++) {
11726 entry = &dev_cntrs[i];
11727 if (entry->flags & CNTR_VL) {
11728 for (vl = 0; vl < C_VL_COUNT; vl++)
11729 read_dev_cntr(dd, i, vl);
11730 } else {
11731 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11732 }
11733 }
11734 ppd = (struct hfi1_pportdata *)(dd + 1);
11735 for (i = 0; i < dd->num_pports; i++, ppd++) {
11736 for (j = 0; j < PORT_CNTR_LAST; j++) {
11737 entry = &port_cntrs[j];
11738 if (entry->flags & CNTR_VL) {
11739 for (vl = 0; vl < C_VL_COUNT; vl++)
11740 read_port_cntr(ppd, j, vl);
11741 } else {
11742 read_port_cntr(ppd, j, CNTR_INVALID_VL);
11743 }
11744 }
11745 }
11746
11747 /*
11748 * We want the value in the register. The goal is to keep track
11749 * of the number of "ticks" not the counter value. In other
11750 * words if the register rolls we want to notice it and go ahead
11751 * and force an update.
11752 */
11753 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11754 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11755 CNTR_MODE_R, 0);
11756
11757 entry = &dev_cntrs[C_DC_RCV_FLITS];
11758 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11759 CNTR_MODE_R, 0);
11760
11761 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11762 dd->unit, dd->last_tx, dd->last_rx);
11763
11764 } else {
11765 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11766 }
11767
11768mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11769}
11770
11771#define C_MAX_NAME 13 /* 12 chars + one for /0 */
11772static int init_cntrs(struct hfi1_devdata *dd)
11773{
Dean Luickc024c552016-01-11 18:30:57 -050011774 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011775 size_t sz;
11776 char *p;
11777 char name[C_MAX_NAME];
11778 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011779 const char *bit_type_32 = ",32";
11780 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011781
11782 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053011783 setup_timer(&dd->synth_stats_timer, update_synth_timer,
11784 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011785
11786 /***********************/
11787 /* per device counters */
11788 /***********************/
11789
11790 /* size names and determine how many we have*/
11791 dd->ndevcntrs = 0;
11792 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011793
11794 for (i = 0; i < DEV_CNTR_LAST; i++) {
11795 hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
11796 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11797 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11798 continue;
11799 }
11800
11801 if (dev_cntrs[i].flags & CNTR_VL) {
11802 hfi1_dbg_early("\tProcessing VL cntr\n");
Dean Luickc024c552016-01-11 18:30:57 -050011803 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011804 for (j = 0; j < C_VL_COUNT; j++) {
11805 memset(name, '\0', C_MAX_NAME);
11806 snprintf(name, C_MAX_NAME, "%s%d",
11807 dev_cntrs[i].name,
11808 vl_from_idx(j));
11809 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011810 /* Add ",32" for 32-bit counters */
11811 if (dev_cntrs[i].flags & CNTR_32BIT)
11812 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011813 sz++;
11814 hfi1_dbg_early("\t\t%s\n", name);
11815 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011816 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011817 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11818 hfi1_dbg_early(
11819 "\tProcessing per SDE counters chip enginers %u\n",
11820 dd->chip_sdma_engines);
Dean Luickc024c552016-01-11 18:30:57 -050011821 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011822 for (j = 0; j < dd->chip_sdma_engines; j++) {
11823 memset(name, '\0', C_MAX_NAME);
11824 snprintf(name, C_MAX_NAME, "%s%d",
11825 dev_cntrs[i].name, j);
11826 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011827 /* Add ",32" for 32-bit counters */
11828 if (dev_cntrs[i].flags & CNTR_32BIT)
11829 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011830 sz++;
11831 hfi1_dbg_early("\t\t%s\n", name);
11832 dd->ndevcntrs++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011833 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011834 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011835 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011836 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011837 /* Add ",32" for 32-bit counters */
11838 if (dev_cntrs[i].flags & CNTR_32BIT)
11839 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050011840 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011841 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011842 hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
11843 }
11844 }
11845
11846 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050011847 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011848 if (!dd->cntrs)
11849 goto bail;
11850
Dean Luickc024c552016-01-11 18:30:57 -050011851 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011852 if (!dd->scntrs)
11853 goto bail;
11854
11855
11856 /* allocate space for the counter names */
11857 dd->cntrnameslen = sz;
11858 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11859 if (!dd->cntrnames)
11860 goto bail;
11861
11862 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050011863 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011864 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11865 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011866 } else if (dev_cntrs[i].flags & CNTR_VL) {
11867 for (j = 0; j < C_VL_COUNT; j++) {
11868 memset(name, '\0', C_MAX_NAME);
11869 snprintf(name, C_MAX_NAME, "%s%d",
11870 dev_cntrs[i].name,
11871 vl_from_idx(j));
11872 memcpy(p, name, strlen(name));
11873 p += strlen(name);
11874
11875 /* Counter is 32 bits */
11876 if (dev_cntrs[i].flags & CNTR_32BIT) {
11877 memcpy(p, bit_type_32, bit_type_32_sz);
11878 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011879 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011880
Mike Marciniszyn77241052015-07-30 15:17:43 -040011881 *p++ = '\n';
11882 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011883 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11884 for (j = 0; j < dd->chip_sdma_engines; j++) {
11885 memset(name, '\0', C_MAX_NAME);
11886 snprintf(name, C_MAX_NAME, "%s%d",
11887 dev_cntrs[i].name, j);
11888 memcpy(p, name, strlen(name));
11889 p += strlen(name);
11890
11891 /* Counter is 32 bits */
11892 if (dev_cntrs[i].flags & CNTR_32BIT) {
11893 memcpy(p, bit_type_32, bit_type_32_sz);
11894 p += bit_type_32_sz;
11895 }
11896
11897 *p++ = '\n';
11898 }
11899 } else {
11900 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11901 p += strlen(dev_cntrs[i].name);
11902
11903 /* Counter is 32 bits */
11904 if (dev_cntrs[i].flags & CNTR_32BIT) {
11905 memcpy(p, bit_type_32, bit_type_32_sz);
11906 p += bit_type_32_sz;
11907 }
11908
11909 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040011910 }
11911 }
11912
11913 /*********************/
11914 /* per port counters */
11915 /*********************/
11916
11917 /*
11918 * Go through the counters for the overflows and disable the ones we
11919 * don't need. This varies based on platform so we need to do it
11920 * dynamically here.
11921 */
11922 rcv_ctxts = dd->num_rcv_contexts;
11923 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11924 i <= C_RCV_HDR_OVF_LAST; i++) {
11925 port_cntrs[i].flags |= CNTR_DISABLED;
11926 }
11927
11928 /* size port counter names and determine how many we have*/
11929 sz = 0;
11930 dd->nportcntrs = 0;
11931 for (i = 0; i < PORT_CNTR_LAST; i++) {
11932 hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
11933 if (port_cntrs[i].flags & CNTR_DISABLED) {
11934 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11935 continue;
11936 }
11937
11938 if (port_cntrs[i].flags & CNTR_VL) {
11939 hfi1_dbg_early("\tProcessing VL cntr\n");
11940 port_cntrs[i].offset = dd->nportcntrs;
11941 for (j = 0; j < C_VL_COUNT; j++) {
11942 memset(name, '\0', C_MAX_NAME);
11943 snprintf(name, C_MAX_NAME, "%s%d",
11944 port_cntrs[i].name,
11945 vl_from_idx(j));
11946 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011947 /* Add ",32" for 32-bit counters */
11948 if (port_cntrs[i].flags & CNTR_32BIT)
11949 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011950 sz++;
11951 hfi1_dbg_early("\t\t%s\n", name);
11952 dd->nportcntrs++;
11953 }
11954 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011955 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011956 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011957 /* Add ",32" for 32-bit counters */
11958 if (port_cntrs[i].flags & CNTR_32BIT)
11959 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011960 port_cntrs[i].offset = dd->nportcntrs;
11961 dd->nportcntrs++;
11962 hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
11963 }
11964 }
11965
11966 /* allocate space for the counter names */
11967 dd->portcntrnameslen = sz;
11968 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11969 if (!dd->portcntrnames)
11970 goto bail;
11971
11972 /* fill in port cntr names */
11973 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11974 if (port_cntrs[i].flags & CNTR_DISABLED)
11975 continue;
11976
11977 if (port_cntrs[i].flags & CNTR_VL) {
11978 for (j = 0; j < C_VL_COUNT; j++) {
11979 memset(name, '\0', C_MAX_NAME);
11980 snprintf(name, C_MAX_NAME, "%s%d",
11981 port_cntrs[i].name,
11982 vl_from_idx(j));
11983 memcpy(p, name, strlen(name));
11984 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011985
11986 /* Counter is 32 bits */
11987 if (port_cntrs[i].flags & CNTR_32BIT) {
11988 memcpy(p, bit_type_32, bit_type_32_sz);
11989 p += bit_type_32_sz;
11990 }
11991
Mike Marciniszyn77241052015-07-30 15:17:43 -040011992 *p++ = '\n';
11993 }
11994 } else {
11995 memcpy(p, port_cntrs[i].name,
11996 strlen(port_cntrs[i].name));
11997 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080011998
11999 /* Counter is 32 bits */
12000 if (port_cntrs[i].flags & CNTR_32BIT) {
12001 memcpy(p, bit_type_32, bit_type_32_sz);
12002 p += bit_type_32_sz;
12003 }
12004
Mike Marciniszyn77241052015-07-30 15:17:43 -040012005 *p++ = '\n';
12006 }
12007 }
12008
12009 /* allocate per port storage for counter values */
12010 ppd = (struct hfi1_pportdata *)(dd + 1);
12011 for (i = 0; i < dd->num_pports; i++, ppd++) {
12012 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12013 if (!ppd->cntrs)
12014 goto bail;
12015
12016 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12017 if (!ppd->scntrs)
12018 goto bail;
12019 }
12020
12021 /* CPU counters need to be allocated and zeroed */
12022 if (init_cpu_counters(dd))
12023 goto bail;
12024
12025 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12026 return 0;
12027bail:
12028 free_cntrs(dd);
12029 return -ENOMEM;
12030}
12031
12032
12033static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12034{
12035 switch (chip_lstate) {
12036 default:
12037 dd_dev_err(dd,
12038 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12039 chip_lstate);
12040 /* fall through */
12041 case LSTATE_DOWN:
12042 return IB_PORT_DOWN;
12043 case LSTATE_INIT:
12044 return IB_PORT_INIT;
12045 case LSTATE_ARMED:
12046 return IB_PORT_ARMED;
12047 case LSTATE_ACTIVE:
12048 return IB_PORT_ACTIVE;
12049 }
12050}
12051
12052u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12053{
12054 /* look at the HFI meta-states only */
12055 switch (chip_pstate & 0xf0) {
12056 default:
12057 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12058 chip_pstate);
12059 /* fall through */
12060 case PLS_DISABLED:
12061 return IB_PORTPHYSSTATE_DISABLED;
12062 case PLS_OFFLINE:
12063 return OPA_PORTPHYSSTATE_OFFLINE;
12064 case PLS_POLLING:
12065 return IB_PORTPHYSSTATE_POLLING;
12066 case PLS_CONFIGPHY:
12067 return IB_PORTPHYSSTATE_TRAINING;
12068 case PLS_LINKUP:
12069 return IB_PORTPHYSSTATE_LINKUP;
12070 case PLS_PHYTEST:
12071 return IB_PORTPHYSSTATE_PHY_TEST;
12072 }
12073}
12074
12075/* return the OPA port logical state name */
12076const char *opa_lstate_name(u32 lstate)
12077{
12078 static const char * const port_logical_names[] = {
12079 "PORT_NOP",
12080 "PORT_DOWN",
12081 "PORT_INIT",
12082 "PORT_ARMED",
12083 "PORT_ACTIVE",
12084 "PORT_ACTIVE_DEFER",
12085 };
12086 if (lstate < ARRAY_SIZE(port_logical_names))
12087 return port_logical_names[lstate];
12088 return "unknown";
12089}
12090
12091/* return the OPA port physical state name */
12092const char *opa_pstate_name(u32 pstate)
12093{
12094 static const char * const port_physical_names[] = {
12095 "PHYS_NOP",
12096 "reserved1",
12097 "PHYS_POLL",
12098 "PHYS_DISABLED",
12099 "PHYS_TRAINING",
12100 "PHYS_LINKUP",
12101 "PHYS_LINK_ERR_RECOVER",
12102 "PHYS_PHY_TEST",
12103 "reserved8",
12104 "PHYS_OFFLINE",
12105 "PHYS_GANGED",
12106 "PHYS_TEST",
12107 };
12108 if (pstate < ARRAY_SIZE(port_physical_names))
12109 return port_physical_names[pstate];
12110 return "unknown";
12111}
12112
12113/*
12114 * Read the hardware link state and set the driver's cached value of it.
12115 * Return the (new) current value.
12116 */
12117u32 get_logical_state(struct hfi1_pportdata *ppd)
12118{
12119 u32 new_state;
12120
12121 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12122 if (new_state != ppd->lstate) {
12123 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12124 opa_lstate_name(new_state), new_state);
12125 ppd->lstate = new_state;
12126 }
12127 /*
12128 * Set port status flags in the page mapped into userspace
12129 * memory. Do it here to ensure a reliable state - this is
12130 * the only function called by all state handling code.
12131 * Always set the flags due to the fact that the cache value
12132 * might have been changed explicitly outside of this
12133 * function.
12134 */
12135 if (ppd->statusp) {
12136 switch (ppd->lstate) {
12137 case IB_PORT_DOWN:
12138 case IB_PORT_INIT:
12139 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12140 HFI1_STATUS_IB_READY);
12141 break;
12142 case IB_PORT_ARMED:
12143 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12144 break;
12145 case IB_PORT_ACTIVE:
12146 *ppd->statusp |= HFI1_STATUS_IB_READY;
12147 break;
12148 }
12149 }
12150 return ppd->lstate;
12151}
12152
12153/**
12154 * wait_logical_linkstate - wait for an IB link state change to occur
12155 * @ppd: port device
12156 * @state: the state to wait for
12157 * @msecs: the number of milliseconds to wait
12158 *
12159 * Wait up to msecs milliseconds for IB link state change to occur.
12160 * For now, take the easy polling route.
12161 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12162 */
12163static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12164 int msecs)
12165{
12166 unsigned long timeout;
12167
12168 timeout = jiffies + msecs_to_jiffies(msecs);
12169 while (1) {
12170 if (get_logical_state(ppd) == state)
12171 return 0;
12172 if (time_after(jiffies, timeout))
12173 break;
12174 msleep(20);
12175 }
12176 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12177
12178 return -ETIMEDOUT;
12179}
12180
12181u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12182{
12183 static u32 remembered_state = 0xff;
12184 u32 pstate;
12185 u32 ib_pstate;
12186
12187 pstate = read_physical_state(ppd->dd);
12188 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12189 if (remembered_state != ib_pstate) {
12190 dd_dev_info(ppd->dd,
12191 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12192 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12193 pstate);
12194 remembered_state = ib_pstate;
12195 }
12196 return ib_pstate;
12197}
12198
12199/*
12200 * Read/modify/write ASIC_QSFP register bits as selected by mask
12201 * data: 0 or 1 in the positions depending on what needs to be written
12202 * dir: 0 for read, 1 for write
12203 * mask: select by setting
12204 * I2CCLK (bit 0)
12205 * I2CDATA (bit 1)
12206 */
12207u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12208 u32 mask)
12209{
12210 u64 qsfp_oe, target_oe;
12211
12212 target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12213 if (mask) {
12214 /* We are writing register bits, so lock access */
12215 dir &= mask;
12216 data &= mask;
12217
12218 qsfp_oe = read_csr(dd, target_oe);
12219 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12220 write_csr(dd, target_oe, qsfp_oe);
12221 }
12222 /* We are exclusively reading bits here, but it is unlikely
12223 * we'll get valid data when we set the direction of the pin
12224 * in the same call, so read should call this function again
12225 * to get valid data
12226 */
12227 return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12228}
12229
12230#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12231(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12232
12233#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12234(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12235
12236int hfi1_init_ctxt(struct send_context *sc)
12237{
12238 if (sc != NULL) {
12239 struct hfi1_devdata *dd = sc->dd;
12240 u64 reg;
12241 u8 set = (sc->type == SC_USER ?
12242 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12243 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12244 reg = read_kctxt_csr(dd, sc->hw_context,
12245 SEND_CTXT_CHECK_ENABLE);
12246 if (set)
12247 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12248 else
12249 SET_STATIC_RATE_CONTROL_SMASK(reg);
12250 write_kctxt_csr(dd, sc->hw_context,
12251 SEND_CTXT_CHECK_ENABLE, reg);
12252 }
12253 return 0;
12254}
12255
12256int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12257{
12258 int ret = 0;
12259 u64 reg;
12260
12261 if (dd->icode != ICODE_RTL_SILICON) {
12262 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12263 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12264 __func__);
12265 return -EINVAL;
12266 }
12267 reg = read_csr(dd, ASIC_STS_THERM);
12268 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12269 ASIC_STS_THERM_CURR_TEMP_MASK);
12270 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12271 ASIC_STS_THERM_LO_TEMP_MASK);
12272 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12273 ASIC_STS_THERM_HI_TEMP_MASK);
12274 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12275 ASIC_STS_THERM_CRIT_TEMP_MASK);
12276 /* triggers is a 3-bit value - 1 bit per trigger. */
12277 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12278
12279 return ret;
12280}
12281
12282/* ========================================================================= */
12283
12284/*
12285 * Enable/disable chip from delivering interrupts.
12286 */
12287void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12288{
12289 int i;
12290
12291 /*
12292 * In HFI, the mask needs to be 1 to allow interrupts.
12293 */
12294 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012295 /* enable all interrupts */
12296 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12297 write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
12298
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012299 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012300 } else {
12301 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12302 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
12303 }
12304}
12305
12306/*
12307 * Clear all interrupt sources on the chip.
12308 */
12309static void clear_all_interrupts(struct hfi1_devdata *dd)
12310{
12311 int i;
12312
12313 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12314 write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
12315
12316 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12317 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12318 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12319 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12320 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12321 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12322 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12323 for (i = 0; i < dd->chip_send_contexts; i++)
12324 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12325 for (i = 0; i < dd->chip_sdma_engines; i++)
12326 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12327
12328 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12329 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12330 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12331}
12332
12333/* Move to pcie.c? */
12334static void disable_intx(struct pci_dev *pdev)
12335{
12336 pci_intx(pdev, 0);
12337}
12338
12339static void clean_up_interrupts(struct hfi1_devdata *dd)
12340{
12341 int i;
12342
12343 /* remove irqs - must happen before disabling/turning off */
12344 if (dd->num_msix_entries) {
12345 /* MSI-X */
12346 struct hfi1_msix_entry *me = dd->msix_entries;
12347
12348 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12349 if (me->arg == NULL) /* => no irq, no affinity */
12350 break;
12351 irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
12352 NULL);
12353 free_irq(me->msix.vector, me->arg);
12354 }
12355 } else {
12356 /* INTx */
12357 if (dd->requested_intx_irq) {
12358 free_irq(dd->pcidev->irq, dd);
12359 dd->requested_intx_irq = 0;
12360 }
12361 }
12362
12363 /* turn off interrupts */
12364 if (dd->num_msix_entries) {
12365 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012366 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012367 } else {
12368 /* INTx */
12369 disable_intx(dd->pcidev);
12370 }
12371
12372 /* clean structures */
12373 for (i = 0; i < dd->num_msix_entries; i++)
12374 free_cpumask_var(dd->msix_entries[i].mask);
12375 kfree(dd->msix_entries);
12376 dd->msix_entries = NULL;
12377 dd->num_msix_entries = 0;
12378}
12379
12380/*
12381 * Remap the interrupt source from the general handler to the given MSI-X
12382 * interrupt.
12383 */
12384static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12385{
12386 u64 reg;
12387 int m, n;
12388
12389 /* clear from the handled mask of the general interrupt */
12390 m = isrc / 64;
12391 n = isrc % 64;
12392 dd->gi_mask[m] &= ~((u64)1 << n);
12393
12394 /* direct the chip source to the given MSI-X interrupt */
12395 m = isrc / 8;
12396 n = isrc % 8;
12397 reg = read_csr(dd, CCE_INT_MAP + (8*m));
12398 reg &= ~((u64)0xff << (8*n));
12399 reg |= ((u64)msix_intr & 0xff) << (8*n);
12400 write_csr(dd, CCE_INT_MAP + (8*m), reg);
12401}
12402
12403static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12404 int engine, int msix_intr)
12405{
12406 /*
12407 * SDMA engine interrupt sources grouped by type, rather than
12408 * engine. Per-engine interrupts are as follows:
12409 * SDMA
12410 * SDMAProgress
12411 * SDMAIdle
12412 */
12413 remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
12414 msix_intr);
12415 remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
12416 msix_intr);
12417 remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
12418 msix_intr);
12419}
12420
Mike Marciniszyn77241052015-07-30 15:17:43 -040012421static int request_intx_irq(struct hfi1_devdata *dd)
12422{
12423 int ret;
12424
Jubin John98050712015-11-16 21:59:27 -050012425 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12426 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012427 ret = request_irq(dd->pcidev->irq, general_interrupt,
12428 IRQF_SHARED, dd->intx_name, dd);
12429 if (ret)
12430 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12431 ret);
12432 else
12433 dd->requested_intx_irq = 1;
12434 return ret;
12435}
12436
12437static int request_msix_irqs(struct hfi1_devdata *dd)
12438{
12439 const struct cpumask *local_mask;
12440 cpumask_var_t def, rcv;
12441 bool def_ret, rcv_ret;
12442 int first_general, last_general;
12443 int first_sdma, last_sdma;
12444 int first_rx, last_rx;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012445 int first_cpu, curr_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012446 int rcv_cpu, sdma_cpu;
12447 int i, ret = 0, possible;
12448 int ht;
12449
12450 /* calculate the ranges we are going to use */
12451 first_general = 0;
12452 first_sdma = last_general = first_general + 1;
12453 first_rx = last_sdma = first_sdma + dd->num_sdma;
12454 last_rx = first_rx + dd->n_krcv_queues;
12455
12456 /*
12457 * Interrupt affinity.
12458 *
12459 * non-rcv avail gets a default mask that
12460 * starts as possible cpus with threads reset
12461 * and each rcv avail reset.
12462 *
12463 * rcv avail gets node relative 1 wrapping back
12464 * to the node relative 1 as necessary.
12465 *
12466 */
12467 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
12468 /* if first cpu is invalid, use NUMA 0 */
12469 if (cpumask_first(local_mask) >= nr_cpu_ids)
12470 local_mask = topology_core_cpumask(0);
12471
12472 def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
12473 rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
12474 if (!def_ret || !rcv_ret)
12475 goto bail;
12476 /* use local mask as default */
12477 cpumask_copy(def, local_mask);
12478 possible = cpumask_weight(def);
12479 /* disarm threads from default */
12480 ht = cpumask_weight(
12481 topology_sibling_cpumask(cpumask_first(local_mask)));
12482 for (i = possible/ht; i < possible; i++)
12483 cpumask_clear_cpu(i, def);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012484 /* def now has full cores on chosen node*/
12485 first_cpu = cpumask_first(def);
12486 if (nr_cpu_ids >= first_cpu)
12487 first_cpu++;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012488 curr_cpu = first_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012489
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012490 /* One context is reserved as control context */
12491 for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012492 cpumask_clear_cpu(curr_cpu, def);
12493 cpumask_set_cpu(curr_cpu, rcv);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012494 curr_cpu = cpumask_next(curr_cpu, def);
12495 if (curr_cpu >= nr_cpu_ids)
12496 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012497 }
12498 /* def mask has non-rcv, rcv has recv mask */
12499 rcv_cpu = cpumask_first(rcv);
12500 sdma_cpu = cpumask_first(def);
12501
12502 /*
12503 * Sanity check - the code expects all SDMA chip source
12504 * interrupts to be in the same CSR, starting at bit 0. Verify
12505 * that this is true by checking the bit location of the start.
12506 */
12507 BUILD_BUG_ON(IS_SDMA_START % 64);
12508
12509 for (i = 0; i < dd->num_msix_entries; i++) {
12510 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12511 const char *err_info;
12512 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012513 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012514 void *arg;
12515 int idx;
12516 struct hfi1_ctxtdata *rcd = NULL;
12517 struct sdma_engine *sde = NULL;
12518
12519 /* obtain the arguments to request_irq */
12520 if (first_general <= i && i < last_general) {
12521 idx = i - first_general;
12522 handler = general_interrupt;
12523 arg = dd;
12524 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012525 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012526 err_info = "general";
12527 } else if (first_sdma <= i && i < last_sdma) {
12528 idx = i - first_sdma;
12529 sde = &dd->per_sdma[idx];
12530 handler = sdma_interrupt;
12531 arg = sde;
12532 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012533 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012534 err_info = "sdma";
12535 remap_sdma_interrupts(dd, idx, i);
12536 } else if (first_rx <= i && i < last_rx) {
12537 idx = i - first_rx;
12538 rcd = dd->rcd[idx];
12539 /* no interrupt if no rcd */
12540 if (!rcd)
12541 continue;
12542 /*
12543 * Set the interrupt register and mask for this
12544 * context's interrupt.
12545 */
12546 rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
12547 rcd->imask = ((u64)1) <<
12548 ((IS_RCVAVAIL_START+idx) % 64);
12549 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012550 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012551 arg = rcd;
12552 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012553 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012554 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012555 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012556 } else {
12557 /* not in our expected range - complain, then
12558 ignore it */
12559 dd_dev_err(dd,
12560 "Unexpected extra MSI-X interrupt %d\n", i);
12561 continue;
12562 }
12563 /* no argument, no interrupt */
12564 if (arg == NULL)
12565 continue;
12566 /* make sure the name is terminated */
12567 me->name[sizeof(me->name)-1] = 0;
12568
Dean Luickf4f30031c2015-10-26 10:28:44 -040012569 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12570 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012571 if (ret) {
12572 dd_dev_err(dd,
12573 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12574 err_info, me->msix.vector, idx, ret);
12575 return ret;
12576 }
12577 /*
12578 * assign arg after request_irq call, so it will be
12579 * cleaned up
12580 */
12581 me->arg = arg;
12582
12583 if (!zalloc_cpumask_var(
12584 &dd->msix_entries[i].mask,
12585 GFP_KERNEL))
12586 goto bail;
12587 if (handler == sdma_interrupt) {
12588 dd_dev_info(dd, "sdma engine %d cpu %d\n",
12589 sde->this_idx, sdma_cpu);
Mike Marciniszyn0a226ed2015-11-09 19:13:58 -050012590 sde->cpu = sdma_cpu;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012591 cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
12592 sdma_cpu = cpumask_next(sdma_cpu, def);
12593 if (sdma_cpu >= nr_cpu_ids)
12594 sdma_cpu = cpumask_first(def);
12595 } else if (handler == receive_context_interrupt) {
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012596 dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
12597 (rcd->ctxt == HFI1_CTRL_CTXT) ?
12598 cpumask_first(def) : rcv_cpu);
12599 if (rcd->ctxt == HFI1_CTRL_CTXT) {
12600 /* map to first default */
12601 cpumask_set_cpu(cpumask_first(def),
12602 dd->msix_entries[i].mask);
12603 } else {
12604 cpumask_set_cpu(rcv_cpu,
12605 dd->msix_entries[i].mask);
12606 rcv_cpu = cpumask_next(rcv_cpu, rcv);
12607 if (rcv_cpu >= nr_cpu_ids)
12608 rcv_cpu = cpumask_first(rcv);
12609 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012610 } else {
12611 /* otherwise first def */
12612 dd_dev_info(dd, "%s cpu %d\n",
12613 err_info, cpumask_first(def));
12614 cpumask_set_cpu(
12615 cpumask_first(def), dd->msix_entries[i].mask);
12616 }
12617 irq_set_affinity_hint(
12618 dd->msix_entries[i].msix.vector,
12619 dd->msix_entries[i].mask);
12620 }
12621
12622out:
12623 free_cpumask_var(def);
12624 free_cpumask_var(rcv);
12625 return ret;
12626bail:
12627 ret = -ENOMEM;
12628 goto out;
12629}
12630
12631/*
12632 * Set the general handler to accept all interrupts, remap all
12633 * chip interrupts back to MSI-X 0.
12634 */
12635static void reset_interrupts(struct hfi1_devdata *dd)
12636{
12637 int i;
12638
12639 /* all interrupts handled by the general handler */
12640 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12641 dd->gi_mask[i] = ~(u64)0;
12642
12643 /* all chip interrupts map to MSI-X 0 */
12644 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12645 write_csr(dd, CCE_INT_MAP + (8*i), 0);
12646}
12647
12648static int set_up_interrupts(struct hfi1_devdata *dd)
12649{
12650 struct hfi1_msix_entry *entries;
12651 u32 total, request;
12652 int i, ret;
12653 int single_interrupt = 0; /* we expect to have all the interrupts */
12654
12655 /*
12656 * Interrupt count:
12657 * 1 general, "slow path" interrupt (includes the SDMA engines
12658 * slow source, SDMACleanupDone)
12659 * N interrupts - one per used SDMA engine
12660 * M interrupt - one per kernel receive context
12661 */
12662 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12663
12664 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12665 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012666 ret = -ENOMEM;
12667 goto fail;
12668 }
12669 /* 1-1 MSI-X entry assignment */
12670 for (i = 0; i < total; i++)
12671 entries[i].msix.entry = i;
12672
12673 /* ask for MSI-X interrupts */
12674 request = total;
12675 request_msix(dd, &request, entries);
12676
12677 if (request == 0) {
12678 /* using INTx */
12679 /* dd->num_msix_entries already zero */
12680 kfree(entries);
12681 single_interrupt = 1;
12682 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12683 } else {
12684 /* using MSI-X */
12685 dd->num_msix_entries = request;
12686 dd->msix_entries = entries;
12687
12688 if (request != total) {
12689 /* using MSI-X, with reduced interrupts */
12690 dd_dev_err(
12691 dd,
12692 "cannot handle reduced interrupt case, want %u, got %u\n",
12693 total, request);
12694 ret = -EINVAL;
12695 goto fail;
12696 }
12697 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12698 }
12699
12700 /* mask all interrupts */
12701 set_intr_state(dd, 0);
12702 /* clear all pending interrupts */
12703 clear_all_interrupts(dd);
12704
12705 /* reset general handler mask, chip MSI-X mappings */
12706 reset_interrupts(dd);
12707
12708 if (single_interrupt)
12709 ret = request_intx_irq(dd);
12710 else
12711 ret = request_msix_irqs(dd);
12712 if (ret)
12713 goto fail;
12714
12715 return 0;
12716
12717fail:
12718 clean_up_interrupts(dd);
12719 return ret;
12720}
12721
12722/*
12723 * Set up context values in dd. Sets:
12724 *
12725 * num_rcv_contexts - number of contexts being used
12726 * n_krcv_queues - number of kernel contexts
12727 * first_user_ctxt - first non-kernel context in array of contexts
12728 * freectxts - number of free user contexts
12729 * num_send_contexts - number of PIO send contexts being used
12730 */
12731static int set_up_context_variables(struct hfi1_devdata *dd)
12732{
12733 int num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012734 int total_contexts;
12735 int ret;
12736 unsigned ngroups;
12737
12738 /*
12739 * Kernel contexts: (to be fixed later):
12740 * - min or 2 or 1 context/numa
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012741 * - Context 0 - control context (VL15/multicast/error)
12742 * - Context 1 - default context
Mike Marciniszyn77241052015-07-30 15:17:43 -040012743 */
12744 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012745 /*
12746 * Don't count context 0 in n_krcvqs since
12747 * is isn't used for normal verbs traffic.
12748 *
12749 * krcvqs will reflect number of kernel
12750 * receive contexts above 0.
12751 */
12752 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012753 else
jubin.john@intel.com0edf80e2016-01-11 18:30:55 -050012754 num_kernel_contexts = num_online_nodes() + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012755 num_kernel_contexts =
12756 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12757 /*
12758 * Every kernel receive context needs an ACK send context.
12759 * one send context is allocated for each VL{0-7} and VL15
12760 */
12761 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12762 dd_dev_err(dd,
12763 "Reducing # kernel rcv contexts to: %d, from %d\n",
12764 (int)(dd->chip_send_contexts - num_vls - 1),
12765 (int)num_kernel_contexts);
12766 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12767 }
12768 /*
12769 * User contexts: (to be fixed later)
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012770 * - default to 1 user context per CPU if num_user_contexts is
12771 * negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012772 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012773 if (num_user_contexts < 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012774 num_user_contexts = num_online_cpus();
12775
12776 total_contexts = num_kernel_contexts + num_user_contexts;
12777
12778 /*
12779 * Adjust the counts given a global max.
12780 */
12781 if (total_contexts > dd->chip_rcv_contexts) {
12782 dd_dev_err(dd,
12783 "Reducing # user receive contexts to: %d, from %d\n",
12784 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12785 (int)num_user_contexts);
12786 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12787 /* recalculate */
12788 total_contexts = num_kernel_contexts + num_user_contexts;
12789 }
12790
12791 /* the first N are kernel contexts, the rest are user contexts */
12792 dd->num_rcv_contexts = total_contexts;
12793 dd->n_krcv_queues = num_kernel_contexts;
12794 dd->first_user_ctxt = num_kernel_contexts;
12795 dd->freectxts = num_user_contexts;
12796 dd_dev_info(dd,
12797 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12798 (int)dd->chip_rcv_contexts,
12799 (int)dd->num_rcv_contexts,
12800 (int)dd->n_krcv_queues,
12801 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12802
12803 /*
12804 * Receive array allocation:
12805 * All RcvArray entries are divided into groups of 8. This
12806 * is required by the hardware and will speed up writes to
12807 * consecutive entries by using write-combining of the entire
12808 * cacheline.
12809 *
12810 * The number of groups are evenly divided among all contexts.
12811 * any left over groups will be given to the first N user
12812 * contexts.
12813 */
12814 dd->rcv_entries.group_size = RCV_INCREMENT;
12815 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12816 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12817 dd->rcv_entries.nctxt_extra = ngroups -
12818 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12819 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12820 dd->rcv_entries.ngroups,
12821 dd->rcv_entries.nctxt_extra);
12822 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12823 MAX_EAGER_ENTRIES * 2) {
12824 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12825 dd->rcv_entries.group_size;
12826 dd_dev_info(dd,
12827 "RcvArray group count too high, change to %u\n",
12828 dd->rcv_entries.ngroups);
12829 dd->rcv_entries.nctxt_extra = 0;
12830 }
12831 /*
12832 * PIO send contexts
12833 */
12834 ret = init_sc_pools_and_sizes(dd);
12835 if (ret >= 0) { /* success */
12836 dd->num_send_contexts = ret;
12837 dd_dev_info(
12838 dd,
12839 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12840 dd->chip_send_contexts,
12841 dd->num_send_contexts,
12842 dd->sc_sizes[SC_KERNEL].count,
12843 dd->sc_sizes[SC_ACK].count,
12844 dd->sc_sizes[SC_USER].count);
12845 ret = 0; /* success */
12846 }
12847
12848 return ret;
12849}
12850
12851/*
12852 * Set the device/port partition key table. The MAD code
12853 * will ensure that, at least, the partial management
12854 * partition key is present in the table.
12855 */
12856static void set_partition_keys(struct hfi1_pportdata *ppd)
12857{
12858 struct hfi1_devdata *dd = ppd->dd;
12859 u64 reg = 0;
12860 int i;
12861
12862 dd_dev_info(dd, "Setting partition keys\n");
12863 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12864 reg |= (ppd->pkeys[i] &
12865 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12866 ((i % 4) *
12867 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12868 /* Each register holds 4 PKey values. */
12869 if ((i % 4) == 3) {
12870 write_csr(dd, RCV_PARTITION_KEY +
12871 ((i - 3) * 2), reg);
12872 reg = 0;
12873 }
12874 }
12875
12876 /* Always enable HW pkeys check when pkeys table is set */
12877 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12878}
12879
12880/*
12881 * These CSRs and memories are uninitialized on reset and must be
12882 * written before reading to set the ECC/parity bits.
12883 *
12884 * NOTE: All user context CSRs that are not mmaped write-only
12885 * (e.g. the TID flows) must be initialized even if the driver never
12886 * reads them.
12887 */
12888static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12889{
12890 int i, j;
12891
12892 /* CceIntMap */
12893 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12894 write_csr(dd, CCE_INT_MAP+(8*i), 0);
12895
12896 /* SendCtxtCreditReturnAddr */
12897 for (i = 0; i < dd->chip_send_contexts; i++)
12898 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12899
12900 /* PIO Send buffers */
12901 /* SDMA Send buffers */
12902 /* These are not normally read, and (presently) have no method
12903 to be read, so are not pre-initialized */
12904
12905 /* RcvHdrAddr */
12906 /* RcvHdrTailAddr */
12907 /* RcvTidFlowTable */
12908 for (i = 0; i < dd->chip_rcv_contexts; i++) {
12909 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12910 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12911 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12912 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
12913 }
12914
12915 /* RcvArray */
12916 for (i = 0; i < dd->chip_rcv_array_count; i++)
12917 write_csr(dd, RCV_ARRAY + (8*i),
12918 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12919
12920 /* RcvQPMapTable */
12921 for (i = 0; i < 32; i++)
12922 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12923}
12924
12925/*
12926 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12927 */
12928static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12929 u64 ctrl_bits)
12930{
12931 unsigned long timeout;
12932 u64 reg;
12933
12934 /* is the condition present? */
12935 reg = read_csr(dd, CCE_STATUS);
12936 if ((reg & status_bits) == 0)
12937 return;
12938
12939 /* clear the condition */
12940 write_csr(dd, CCE_CTRL, ctrl_bits);
12941
12942 /* wait for the condition to clear */
12943 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12944 while (1) {
12945 reg = read_csr(dd, CCE_STATUS);
12946 if ((reg & status_bits) == 0)
12947 return;
12948 if (time_after(jiffies, timeout)) {
12949 dd_dev_err(dd,
12950 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12951 status_bits, reg & status_bits);
12952 return;
12953 }
12954 udelay(1);
12955 }
12956}
12957
12958/* set CCE CSRs to chip reset defaults */
12959static void reset_cce_csrs(struct hfi1_devdata *dd)
12960{
12961 int i;
12962
12963 /* CCE_REVISION read-only */
12964 /* CCE_REVISION2 read-only */
12965 /* CCE_CTRL - bits clear automatically */
12966 /* CCE_STATUS read-only, use CceCtrl to clear */
12967 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12968 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12969 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12970 for (i = 0; i < CCE_NUM_SCRATCH; i++)
12971 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12972 /* CCE_ERR_STATUS read-only */
12973 write_csr(dd, CCE_ERR_MASK, 0);
12974 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12975 /* CCE_ERR_FORCE leave alone */
12976 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12977 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12978 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12979 /* CCE_PCIE_CTRL leave alone */
12980 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12981 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12982 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12983 CCE_MSIX_TABLE_UPPER_RESETCSR);
12984 }
12985 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12986 /* CCE_MSIX_PBA read-only */
12987 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12988 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12989 }
12990 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12991 write_csr(dd, CCE_INT_MAP, 0);
12992 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12993 /* CCE_INT_STATUS read-only */
12994 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12995 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12996 /* CCE_INT_FORCE leave alone */
12997 /* CCE_INT_BLOCKED read-only */
12998 }
12999 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13000 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13001}
13002
13003/* set ASIC CSRs to chip reset defaults */
13004static void reset_asic_csrs(struct hfi1_devdata *dd)
13005{
Mike Marciniszyn77241052015-07-30 15:17:43 -040013006 int i;
13007
13008 /*
13009 * If the HFIs are shared between separate nodes or VMs,
13010 * then more will need to be done here. One idea is a module
13011 * parameter that returns early, letting the first power-on or
13012 * a known first load do the reset and blocking all others.
13013 */
13014
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013015 if (!(dd->flags & HFI1_DO_INIT_ASIC))
13016 return;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013017
13018 if (dd->icode != ICODE_FPGA_EMULATION) {
13019 /* emulation does not have an SBus - leave these alone */
13020 /*
13021 * All writes to ASIC_CFG_SBUS_REQUEST do something.
13022 * Notes:
13023 * o The reset is not zero if aimed at the core. See the
13024 * SBus documentation for details.
13025 * o If the SBus firmware has been updated (e.g. by the BIOS),
13026 * will the reset revert that?
13027 */
13028 /* ASIC_CFG_SBUS_REQUEST leave alone */
13029 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
13030 }
13031 /* ASIC_SBUS_RESULT read-only */
13032 write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
13033 for (i = 0; i < ASIC_NUM_SCRATCH; i++)
13034 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
13035 write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013036
13037 /* We might want to retain this state across FLR if we ever use it */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013038 write_csr(dd, ASIC_CFG_DRV_STR, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013039
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050013040 /* ASIC_CFG_THERM_POLL_EN leave alone */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013041 /* ASIC_STS_THERM read-only */
13042 /* ASIC_CFG_RESET leave alone */
13043
13044 write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
13045 /* ASIC_PCIE_SD_HOST_STATUS read-only */
13046 write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
13047 write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
13048 /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
13049 write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
13050 /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
13051 /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
13052 for (i = 0; i < 16; i++)
13053 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
13054
13055 /* ASIC_GPIO_IN read-only */
13056 write_csr(dd, ASIC_GPIO_OE, 0);
13057 write_csr(dd, ASIC_GPIO_INVERT, 0);
13058 write_csr(dd, ASIC_GPIO_OUT, 0);
13059 write_csr(dd, ASIC_GPIO_MASK, 0);
13060 /* ASIC_GPIO_STATUS read-only */
13061 write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
13062 /* ASIC_GPIO_FORCE leave alone */
13063
13064 /* ASIC_QSFP1_IN read-only */
13065 write_csr(dd, ASIC_QSFP1_OE, 0);
13066 write_csr(dd, ASIC_QSFP1_INVERT, 0);
13067 write_csr(dd, ASIC_QSFP1_OUT, 0);
13068 write_csr(dd, ASIC_QSFP1_MASK, 0);
13069 /* ASIC_QSFP1_STATUS read-only */
13070 write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13071 /* ASIC_QSFP1_FORCE leave alone */
13072
13073 /* ASIC_QSFP2_IN read-only */
13074 write_csr(dd, ASIC_QSFP2_OE, 0);
13075 write_csr(dd, ASIC_QSFP2_INVERT, 0);
13076 write_csr(dd, ASIC_QSFP2_OUT, 0);
13077 write_csr(dd, ASIC_QSFP2_MASK, 0);
13078 /* ASIC_QSFP2_STATUS read-only */
13079 write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13080 /* ASIC_QSFP2_FORCE leave alone */
13081
13082 write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13083 /* this also writes a NOP command, clearing paging mode */
13084 write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13085 write_csr(dd, ASIC_EEP_DATA, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013086}
13087
13088/* set MISC CSRs to chip reset defaults */
13089static void reset_misc_csrs(struct hfi1_devdata *dd)
13090{
13091 int i;
13092
13093 for (i = 0; i < 32; i++) {
13094 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13095 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13096 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13097 }
13098 /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13099 only be written 128-byte chunks */
13100 /* init RSA engine to clear lingering errors */
13101 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13102 write_csr(dd, MISC_CFG_RSA_MU, 0);
13103 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13104 /* MISC_STS_8051_DIGEST read-only */
13105 /* MISC_STS_SBM_DIGEST read-only */
13106 /* MISC_STS_PCIE_DIGEST read-only */
13107 /* MISC_STS_FAB_DIGEST read-only */
13108 /* MISC_ERR_STATUS read-only */
13109 write_csr(dd, MISC_ERR_MASK, 0);
13110 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13111 /* MISC_ERR_FORCE leave alone */
13112}
13113
13114/* set TXE CSRs to chip reset defaults */
13115static void reset_txe_csrs(struct hfi1_devdata *dd)
13116{
13117 int i;
13118
13119 /*
13120 * TXE Kernel CSRs
13121 */
13122 write_csr(dd, SEND_CTRL, 0);
13123 __cm_reset(dd, 0); /* reset CM internal state */
13124 /* SEND_CONTEXTS read-only */
13125 /* SEND_DMA_ENGINES read-only */
13126 /* SEND_PIO_MEM_SIZE read-only */
13127 /* SEND_DMA_MEM_SIZE read-only */
13128 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13129 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13130 /* SEND_PIO_ERR_STATUS read-only */
13131 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13132 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13133 /* SEND_PIO_ERR_FORCE leave alone */
13134 /* SEND_DMA_ERR_STATUS read-only */
13135 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13136 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13137 /* SEND_DMA_ERR_FORCE leave alone */
13138 /* SEND_EGRESS_ERR_STATUS read-only */
13139 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13140 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13141 /* SEND_EGRESS_ERR_FORCE leave alone */
13142 write_csr(dd, SEND_BTH_QP, 0);
13143 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13144 write_csr(dd, SEND_SC2VLT0, 0);
13145 write_csr(dd, SEND_SC2VLT1, 0);
13146 write_csr(dd, SEND_SC2VLT2, 0);
13147 write_csr(dd, SEND_SC2VLT3, 0);
13148 write_csr(dd, SEND_LEN_CHECK0, 0);
13149 write_csr(dd, SEND_LEN_CHECK1, 0);
13150 /* SEND_ERR_STATUS read-only */
13151 write_csr(dd, SEND_ERR_MASK, 0);
13152 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13153 /* SEND_ERR_FORCE read-only */
13154 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13155 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
13156 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13157 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
13158 for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
13159 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
13160 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13161 write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
13162 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13163 write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
13164 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13165 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
13166 SEND_CM_GLOBAL_CREDIT_RESETCSR);
13167 /* SEND_CM_CREDIT_USED_STATUS read-only */
13168 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13169 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13170 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13171 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13172 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13173 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13174 write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
13175 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13176 /* SEND_CM_CREDIT_USED_VL read-only */
13177 /* SEND_CM_CREDIT_USED_VL15 read-only */
13178 /* SEND_EGRESS_CTXT_STATUS read-only */
13179 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13180 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13181 /* SEND_EGRESS_ERR_INFO read-only */
13182 /* SEND_EGRESS_ERR_SOURCE read-only */
13183
13184 /*
13185 * TXE Per-Context CSRs
13186 */
13187 for (i = 0; i < dd->chip_send_contexts; i++) {
13188 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13189 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13190 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13191 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13192 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13193 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13194 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13195 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13196 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13197 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13198 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13199 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13200 }
13201
13202 /*
13203 * TXE Per-SDMA CSRs
13204 */
13205 for (i = 0; i < dd->chip_sdma_engines; i++) {
13206 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13207 /* SEND_DMA_STATUS read-only */
13208 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13209 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13210 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13211 /* SEND_DMA_HEAD read-only */
13212 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13213 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13214 /* SEND_DMA_IDLE_CNT read-only */
13215 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13216 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13217 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13218 /* SEND_DMA_ENG_ERR_STATUS read-only */
13219 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13220 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13221 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13222 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13223 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13224 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13225 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13226 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13227 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13228 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13229 }
13230}
13231
13232/*
13233 * Expect on entry:
13234 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13235 */
13236static void init_rbufs(struct hfi1_devdata *dd)
13237{
13238 u64 reg;
13239 int count;
13240
13241 /*
13242 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13243 * clear.
13244 */
13245 count = 0;
13246 while (1) {
13247 reg = read_csr(dd, RCV_STATUS);
13248 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13249 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13250 break;
13251 /*
13252 * Give up after 1ms - maximum wait time.
13253 *
13254 * RBuf size is 148KiB. Slowest possible is PCIe Gen1 x1 at
13255 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13256 * 148 KB / (66% * 250MB/s) = 920us
13257 */
13258 if (count++ > 500) {
13259 dd_dev_err(dd,
13260 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13261 __func__, reg);
13262 break;
13263 }
13264 udelay(2); /* do not busy-wait the CSR */
13265 }
13266
13267 /* start the init - expect RcvCtrl to be 0 */
13268 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13269
13270 /*
13271 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13272 * period after the write before RcvStatus.RxRbufInitDone is valid.
13273 * The delay in the first run through the loop below is sufficient and
13274 * required before the first read of RcvStatus.RxRbufInintDone.
13275 */
13276 read_csr(dd, RCV_CTRL);
13277
13278 /* wait for the init to finish */
13279 count = 0;
13280 while (1) {
13281 /* delay is required first time through - see above */
13282 udelay(2); /* do not busy-wait the CSR */
13283 reg = read_csr(dd, RCV_STATUS);
13284 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13285 break;
13286
13287 /* give up after 100us - slowest possible at 33MHz is 73us */
13288 if (count++ > 50) {
13289 dd_dev_err(dd,
13290 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13291 __func__);
13292 break;
13293 }
13294 }
13295}
13296
13297/* set RXE CSRs to chip reset defaults */
13298static void reset_rxe_csrs(struct hfi1_devdata *dd)
13299{
13300 int i, j;
13301
13302 /*
13303 * RXE Kernel CSRs
13304 */
13305 write_csr(dd, RCV_CTRL, 0);
13306 init_rbufs(dd);
13307 /* RCV_STATUS read-only */
13308 /* RCV_CONTEXTS read-only */
13309 /* RCV_ARRAY_CNT read-only */
13310 /* RCV_BUF_SIZE read-only */
13311 write_csr(dd, RCV_BTH_QP, 0);
13312 write_csr(dd, RCV_MULTICAST, 0);
13313 write_csr(dd, RCV_BYPASS, 0);
13314 write_csr(dd, RCV_VL15, 0);
13315 /* this is a clear-down */
13316 write_csr(dd, RCV_ERR_INFO,
13317 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13318 /* RCV_ERR_STATUS read-only */
13319 write_csr(dd, RCV_ERR_MASK, 0);
13320 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13321 /* RCV_ERR_FORCE leave alone */
13322 for (i = 0; i < 32; i++)
13323 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13324 for (i = 0; i < 4; i++)
13325 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13326 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13327 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13328 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13329 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13330 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13331 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13332 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13333 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13334 }
13335 for (i = 0; i < 32; i++)
13336 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13337
13338 /*
13339 * RXE Kernel and User Per-Context CSRs
13340 */
13341 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13342 /* kernel */
13343 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13344 /* RCV_CTXT_STATUS read-only */
13345 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13346 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13347 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13348 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13349 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13350 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13351 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13352 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13353 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13354 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13355
13356 /* user */
13357 /* RCV_HDR_TAIL read-only */
13358 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13359 /* RCV_EGR_INDEX_TAIL read-only */
13360 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13361 /* RCV_EGR_OFFSET_TAIL read-only */
13362 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13363 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
13364 0);
13365 }
13366 }
13367}
13368
13369/*
13370 * Set sc2vl tables.
13371 *
13372 * They power on to zeros, so to avoid send context errors
13373 * they need to be set:
13374 *
13375 * SC 0-7 -> VL 0-7 (respectively)
13376 * SC 15 -> VL 15
13377 * otherwise
13378 * -> VL 0
13379 */
13380static void init_sc2vl_tables(struct hfi1_devdata *dd)
13381{
13382 int i;
13383 /* init per architecture spec, constrained by hardware capability */
13384
13385 /* HFI maps sent packets */
13386 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13387 0,
13388 0, 0, 1, 1,
13389 2, 2, 3, 3,
13390 4, 4, 5, 5,
13391 6, 6, 7, 7));
13392 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13393 1,
13394 8, 0, 9, 0,
13395 10, 0, 11, 0,
13396 12, 0, 13, 0,
13397 14, 0, 15, 15));
13398 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13399 2,
13400 16, 0, 17, 0,
13401 18, 0, 19, 0,
13402 20, 0, 21, 0,
13403 22, 0, 23, 0));
13404 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13405 3,
13406 24, 0, 25, 0,
13407 26, 0, 27, 0,
13408 28, 0, 29, 0,
13409 30, 0, 31, 0));
13410
13411 /* DC maps received packets */
13412 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13413 15_0,
13414 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13415 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13416 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13417 31_16,
13418 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13419 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13420
13421 /* initialize the cached sc2vl values consistently with h/w */
13422 for (i = 0; i < 32; i++) {
13423 if (i < 8 || i == 15)
13424 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13425 else
13426 *((u8 *)(dd->sc2vl) + i) = 0;
13427 }
13428}
13429
13430/*
13431 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13432 * depend on the chip going through a power-on reset - a driver may be loaded
13433 * and unloaded many times.
13434 *
13435 * Do not write any CSR values to the chip in this routine - there may be
13436 * a reset following the (possible) FLR in this routine.
13437 *
13438 */
13439static void init_chip(struct hfi1_devdata *dd)
13440{
13441 int i;
13442
13443 /*
13444 * Put the HFI CSRs in a known state.
13445 * Combine this with a DC reset.
13446 *
13447 * Stop the device from doing anything while we do a
13448 * reset. We know there are no other active users of
13449 * the device since we are now in charge. Turn off
13450 * off all outbound and inbound traffic and make sure
13451 * the device does not generate any interrupts.
13452 */
13453
13454 /* disable send contexts and SDMA engines */
13455 write_csr(dd, SEND_CTRL, 0);
13456 for (i = 0; i < dd->chip_send_contexts; i++)
13457 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13458 for (i = 0; i < dd->chip_sdma_engines; i++)
13459 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13460 /* disable port (turn off RXE inbound traffic) and contexts */
13461 write_csr(dd, RCV_CTRL, 0);
13462 for (i = 0; i < dd->chip_rcv_contexts; i++)
13463 write_csr(dd, RCV_CTXT_CTRL, 0);
13464 /* mask all interrupt sources */
13465 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13466 write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
13467
13468 /*
13469 * DC Reset: do a full DC reset before the register clear.
13470 * A recommended length of time to hold is one CSR read,
13471 * so reread the CceDcCtrl. Then, hold the DC in reset
13472 * across the clear.
13473 */
13474 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13475 (void) read_csr(dd, CCE_DC_CTRL);
13476
13477 if (use_flr) {
13478 /*
13479 * A FLR will reset the SPC core and part of the PCIe.
13480 * The parts that need to be restored have already been
13481 * saved.
13482 */
13483 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13484
13485 /* do the FLR, the DC reset will remain */
13486 hfi1_pcie_flr(dd);
13487
13488 /* restore command and BARs */
13489 restore_pci_variables(dd);
13490
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013491 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013492 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13493 hfi1_pcie_flr(dd);
13494 restore_pci_variables(dd);
13495 }
13496
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013497 reset_asic_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013498 } else {
13499 dd_dev_info(dd, "Resetting CSRs with writes\n");
13500 reset_cce_csrs(dd);
13501 reset_txe_csrs(dd);
13502 reset_rxe_csrs(dd);
13503 reset_asic_csrs(dd);
13504 reset_misc_csrs(dd);
13505 }
13506 /* clear the DC reset */
13507 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013508
Mike Marciniszyn77241052015-07-30 15:17:43 -040013509 /* Set the LED off */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013510 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013511 setextled(dd, 0);
13512 /*
13513 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013514 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013515 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013516 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013517 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013518 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013519 * I2CCLK and I2CDAT will change per direction, and INT_N and
13520 * MODPRS_N are input only and their value is ignored.
13521 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013522 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13523 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013524}
13525
13526static void init_early_variables(struct hfi1_devdata *dd)
13527{
13528 int i;
13529
13530 /* assign link credit variables */
13531 dd->vau = CM_VAU;
13532 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013533 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013534 dd->link_credits--;
13535 dd->vcu = cu_to_vcu(hfi1_cu);
13536 /* enough room for 8 MAD packets plus header - 17K */
13537 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13538 if (dd->vl15_init > dd->link_credits)
13539 dd->vl15_init = dd->link_credits;
13540
13541 write_uninitialized_csrs_and_memories(dd);
13542
13543 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13544 for (i = 0; i < dd->num_pports; i++) {
13545 struct hfi1_pportdata *ppd = &dd->pport[i];
13546
13547 set_partition_keys(ppd);
13548 }
13549 init_sc2vl_tables(dd);
13550}
13551
13552static void init_kdeth_qp(struct hfi1_devdata *dd)
13553{
13554 /* user changed the KDETH_QP */
13555 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13556 /* out of range or illegal value */
13557 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13558 kdeth_qp = 0;
13559 }
13560 if (kdeth_qp == 0) /* not set, or failed range check */
13561 kdeth_qp = DEFAULT_KDETH_QP;
13562
13563 write_csr(dd, SEND_BTH_QP,
13564 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
13565 << SEND_BTH_QP_KDETH_QP_SHIFT);
13566
13567 write_csr(dd, RCV_BTH_QP,
13568 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
13569 << RCV_BTH_QP_KDETH_QP_SHIFT);
13570}
13571
13572/**
13573 * init_qpmap_table
13574 * @dd - device data
13575 * @first_ctxt - first context
13576 * @last_ctxt - first context
13577 *
13578 * This return sets the qpn mapping table that
13579 * is indexed by qpn[8:1].
13580 *
13581 * The routine will round robin the 256 settings
13582 * from first_ctxt to last_ctxt.
13583 *
13584 * The first/last looks ahead to having specialized
13585 * receive contexts for mgmt and bypass. Normal
13586 * verbs traffic will assumed to be on a range
13587 * of receive contexts.
13588 */
13589static void init_qpmap_table(struct hfi1_devdata *dd,
13590 u32 first_ctxt,
13591 u32 last_ctxt)
13592{
13593 u64 reg = 0;
13594 u64 regno = RCV_QP_MAP_TABLE;
13595 int i;
13596 u64 ctxt = first_ctxt;
13597
13598 for (i = 0; i < 256;) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013599 reg |= ctxt << (8 * (i % 8));
13600 i++;
13601 ctxt++;
13602 if (ctxt > last_ctxt)
13603 ctxt = first_ctxt;
13604 if (i % 8 == 0) {
13605 write_csr(dd, regno, reg);
13606 reg = 0;
13607 regno += 8;
13608 }
13609 }
13610 if (i % 8)
13611 write_csr(dd, regno, reg);
13612
13613 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13614 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13615}
13616
13617/**
13618 * init_qos - init RX qos
13619 * @dd - device data
13620 * @first_context
13621 *
13622 * This routine initializes Rule 0 and the
13623 * RSM map table to implement qos.
13624 *
13625 * If all of the limit tests succeed,
13626 * qos is applied based on the array
13627 * interpretation of krcvqs where
13628 * entry 0 is VL0.
13629 *
13630 * The number of vl bits (n) and the number of qpn
13631 * bits (m) are computed to feed both the RSM map table
13632 * and the single rule.
13633 *
13634 */
13635static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13636{
13637 u8 max_by_vl = 0;
13638 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13639 u64 *rsmmap;
13640 u64 reg;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013641 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013642
13643 /* validate */
13644 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13645 num_vls == 1 ||
13646 krcvqsset <= 1)
13647 goto bail;
13648 for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13649 if (krcvqs[i] > max_by_vl)
13650 max_by_vl = krcvqs[i];
13651 if (max_by_vl > 32)
13652 goto bail;
13653 qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13654 /* determine bits vl */
13655 n = ilog2(num_vls);
13656 /* determine bits for qpn */
13657 m = ilog2(qpns_per_vl);
13658 if ((m + n) > 7)
13659 goto bail;
13660 if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13661 goto bail;
13662 rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013663 if (!rsmmap)
13664 goto bail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013665 memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13666 /* init the local copy of the table */
13667 for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13668 unsigned tctxt;
13669
13670 for (qpn = 0, tctxt = ctxt;
13671 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13672 unsigned idx, regoff, regidx;
13673
13674 /* generate index <= 128 */
13675 idx = (qpn << n) ^ i;
13676 regoff = (idx % 8) * 8;
13677 regidx = idx / 8;
13678 reg = rsmmap[regidx];
13679 /* replace 0xff with context number */
13680 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13681 << regoff);
13682 reg |= (u64)(tctxt++) << regoff;
13683 rsmmap[regidx] = reg;
13684 if (tctxt == ctxt + krcvqs[i])
13685 tctxt = ctxt;
13686 }
13687 ctxt += krcvqs[i];
13688 }
13689 /* flush cached copies to chip */
13690 for (i = 0; i < NUM_MAP_REGS; i++)
13691 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13692 /* add rule0 */
13693 write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13694 RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
13695 << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13696 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13697 write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13698 LRH_BTH_MATCH_OFFSET
13699 << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13700 LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13701 LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13702 ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13703 QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13704 ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13705 write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13706 LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13707 LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13708 LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13709 LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13710 /* Enable RSM */
13711 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13712 kfree(rsmmap);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013713 /* map everything else to first context */
13714 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013715 dd->qos_shift = n + 1;
13716 return;
13717bail:
13718 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013719 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013720}
13721
13722static void init_rxe(struct hfi1_devdata *dd)
13723{
13724 /* enable all receive errors */
13725 write_csr(dd, RCV_ERR_MASK, ~0ull);
13726 /* setup QPN map table - start where VL15 context leaves off */
13727 init_qos(
13728 dd,
13729 dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
13730 /*
13731 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13732 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13733 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
13734 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13735 * Max_PayLoad_Size set to its minimum of 128.
13736 *
13737 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13738 * (64 bytes). Max_Payload_Size is possibly modified upward in
13739 * tune_pcie_caps() which is called after this routine.
13740 */
13741}
13742
13743static void init_other(struct hfi1_devdata *dd)
13744{
13745 /* enable all CCE errors */
13746 write_csr(dd, CCE_ERR_MASK, ~0ull);
13747 /* enable *some* Misc errors */
13748 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13749 /* enable all DC errors, except LCB */
13750 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13751 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13752}
13753
13754/*
13755 * Fill out the given AU table using the given CU. A CU is defined in terms
13756 * AUs. The table is a an encoding: given the index, how many AUs does that
13757 * represent?
13758 *
13759 * NOTE: Assumes that the register layout is the same for the
13760 * local and remote tables.
13761 */
13762static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13763 u32 csr0to3, u32 csr4to7)
13764{
13765 write_csr(dd, csr0to3,
13766 0ull <<
13767 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
13768 | 1ull <<
13769 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
13770 | 2ull * cu <<
13771 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
13772 | 4ull * cu <<
13773 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13774 write_csr(dd, csr4to7,
13775 8ull * cu <<
13776 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
13777 | 16ull * cu <<
13778 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
13779 | 32ull * cu <<
13780 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
13781 | 64ull * cu <<
13782 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13783
13784}
13785
13786static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13787{
13788 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13789 SEND_CM_LOCAL_AU_TABLE4_TO7);
13790}
13791
13792void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13793{
13794 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13795 SEND_CM_REMOTE_AU_TABLE4_TO7);
13796}
13797
13798static void init_txe(struct hfi1_devdata *dd)
13799{
13800 int i;
13801
13802 /* enable all PIO, SDMA, general, and Egress errors */
13803 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13804 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13805 write_csr(dd, SEND_ERR_MASK, ~0ull);
13806 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13807
13808 /* enable all per-context and per-SDMA engine errors */
13809 for (i = 0; i < dd->chip_send_contexts; i++)
13810 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13811 for (i = 0; i < dd->chip_sdma_engines; i++)
13812 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13813
13814 /* set the local CU to AU mapping */
13815 assign_local_cm_au_table(dd, dd->vcu);
13816
13817 /*
13818 * Set reasonable default for Credit Return Timer
13819 * Don't set on Simulator - causes it to choke.
13820 */
13821 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13822 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13823}
13824
13825int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13826{
13827 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13828 unsigned sctxt;
13829 int ret = 0;
13830 u64 reg;
13831
13832 if (!rcd || !rcd->sc) {
13833 ret = -EINVAL;
13834 goto done;
13835 }
13836 sctxt = rcd->sc->hw_context;
13837 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13838 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13839 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13840 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13841 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13842 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13843 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13844 /*
13845 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040013846 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013847 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013848 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13849 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13850 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13851 }
13852
13853 /* Enable J_KEY check on receive context. */
13854 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13855 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13856 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13857 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13858done:
13859 return ret;
13860}
13861
13862int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13863{
13864 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13865 unsigned sctxt;
13866 int ret = 0;
13867 u64 reg;
13868
13869 if (!rcd || !rcd->sc) {
13870 ret = -EINVAL;
13871 goto done;
13872 }
13873 sctxt = rcd->sc->hw_context;
13874 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13875 /*
13876 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13877 * This check would not have been enabled for A0 h/w, see
13878 * set_ctxt_jkey().
13879 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013880 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013881 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13882 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13883 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13884 }
13885 /* Turn off the J_KEY on the receive side */
13886 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13887done:
13888 return ret;
13889}
13890
13891int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13892{
13893 struct hfi1_ctxtdata *rcd;
13894 unsigned sctxt;
13895 int ret = 0;
13896 u64 reg;
13897
13898 if (ctxt < dd->num_rcv_contexts)
13899 rcd = dd->rcd[ctxt];
13900 else {
13901 ret = -EINVAL;
13902 goto done;
13903 }
13904 if (!rcd || !rcd->sc) {
13905 ret = -EINVAL;
13906 goto done;
13907 }
13908 sctxt = rcd->sc->hw_context;
13909 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13910 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13911 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13912 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13913 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13914 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13915done:
13916 return ret;
13917}
13918
13919int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13920{
13921 struct hfi1_ctxtdata *rcd;
13922 unsigned sctxt;
13923 int ret = 0;
13924 u64 reg;
13925
13926 if (ctxt < dd->num_rcv_contexts)
13927 rcd = dd->rcd[ctxt];
13928 else {
13929 ret = -EINVAL;
13930 goto done;
13931 }
13932 if (!rcd || !rcd->sc) {
13933 ret = -EINVAL;
13934 goto done;
13935 }
13936 sctxt = rcd->sc->hw_context;
13937 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13938 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13939 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13940 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13941done:
13942 return ret;
13943}
13944
13945/*
13946 * Start doing the clean up the the chip. Our clean up happens in multiple
13947 * stages and this is just the first.
13948 */
13949void hfi1_start_cleanup(struct hfi1_devdata *dd)
13950{
13951 free_cntrs(dd);
13952 free_rcverr(dd);
13953 clean_up_interrupts(dd);
13954}
13955
13956#define HFI_BASE_GUID(dev) \
13957 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13958
13959/*
13960 * Certain chip functions need to be initialized only once per asic
13961 * instead of per-device. This function finds the peer device and
13962 * checks whether that chip initialization needs to be done by this
13963 * device.
13964 */
13965static void asic_should_init(struct hfi1_devdata *dd)
13966{
13967 unsigned long flags;
13968 struct hfi1_devdata *tmp, *peer = NULL;
13969
13970 spin_lock_irqsave(&hfi1_devs_lock, flags);
13971 /* Find our peer device */
13972 list_for_each_entry(tmp, &hfi1_dev_list, list) {
13973 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13974 dd->unit != tmp->unit) {
13975 peer = tmp;
13976 break;
13977 }
13978 }
13979
13980 /*
13981 * "Claim" the ASIC for initialization if it hasn't been
13982 " "claimed" yet.
13983 */
13984 if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13985 dd->flags |= HFI1_DO_INIT_ASIC;
13986 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13987}
13988
Dean Luick5d9157a2015-11-16 21:59:34 -050013989/*
13990 * Set dd->boardname. Use a generic name if a name is not returned from
13991 * EFI variable space.
13992 *
13993 * Return 0 on success, -ENOMEM if space could not be allocated.
13994 */
13995static int obtain_boardname(struct hfi1_devdata *dd)
13996{
13997 /* generic board description */
13998 const char generic[] =
13999 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14000 unsigned long size;
14001 int ret;
14002
14003 ret = read_hfi1_efi_var(dd, "description", &size,
14004 (void **)&dd->boardname);
14005 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014006 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014007 /* use generic description */
14008 dd->boardname = kstrdup(generic, GFP_KERNEL);
14009 if (!dd->boardname)
14010 return -ENOMEM;
14011 }
14012 return 0;
14013}
14014
Mike Marciniszyn77241052015-07-30 15:17:43 -040014015/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014016 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014017 * @dev: the pci_dev for hfi1_ib device
14018 * @ent: pci_device_id struct for this dev
14019 *
14020 * Also allocates, initializes, and returns the devdata struct for this
14021 * device instance
14022 *
14023 * This is global, and is called directly at init to set up the
14024 * chip-specific function pointers for later use.
14025 */
14026struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14027 const struct pci_device_id *ent)
14028{
14029 struct hfi1_devdata *dd;
14030 struct hfi1_pportdata *ppd;
14031 u64 reg;
14032 int i, ret;
14033 static const char * const inames[] = { /* implementation names */
14034 "RTL silicon",
14035 "RTL VCS simulation",
14036 "RTL FPGA emulation",
14037 "Functional simulator"
14038 };
14039
14040 dd = hfi1_alloc_devdata(pdev,
14041 NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
14042 if (IS_ERR(dd))
14043 goto bail;
14044 ppd = dd->pport;
14045 for (i = 0; i < dd->num_pports; i++, ppd++) {
14046 int vl;
14047 /* init common fields */
14048 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14049 /* DC supports 4 link widths */
14050 ppd->link_width_supported =
14051 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14052 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14053 ppd->link_width_downgrade_supported =
14054 ppd->link_width_supported;
14055 /* start out enabling only 4X */
14056 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14057 ppd->link_width_downgrade_enabled =
14058 ppd->link_width_downgrade_supported;
14059 /* link width active is 0 when link is down */
14060 /* link width downgrade active is 0 when link is down */
14061
14062 if (num_vls < HFI1_MIN_VLS_SUPPORTED
14063 || num_vls > HFI1_MAX_VLS_SUPPORTED) {
14064 hfi1_early_err(&pdev->dev,
14065 "Invalid num_vls %u, using %u VLs\n",
14066 num_vls, HFI1_MAX_VLS_SUPPORTED);
14067 num_vls = HFI1_MAX_VLS_SUPPORTED;
14068 }
14069 ppd->vls_supported = num_vls;
14070 ppd->vls_operational = ppd->vls_supported;
14071 /* Set the default MTU. */
14072 for (vl = 0; vl < num_vls; vl++)
14073 dd->vld[vl].mtu = hfi1_max_mtu;
14074 dd->vld[15].mtu = MAX_MAD_PACKET;
14075 /*
14076 * Set the initial values to reasonable default, will be set
14077 * for real when link is up.
14078 */
14079 ppd->lstate = IB_PORT_DOWN;
14080 ppd->overrun_threshold = 0x4;
14081 ppd->phy_error_threshold = 0xf;
14082 ppd->port_crc_mode_enabled = link_crc_mask;
14083 /* initialize supported LTP CRC mode */
14084 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14085 /* initialize enabled LTP CRC mode */
14086 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14087 /* start in offline */
14088 ppd->host_link_state = HLS_DN_OFFLINE;
14089 init_vl_arb_caches(ppd);
14090 }
14091
14092 dd->link_default = HLS_DN_POLL;
14093
14094 /*
14095 * Do remaining PCIe setup and save PCIe values in dd.
14096 * Any error printing is already done by the init code.
14097 * On return, we have the chip mapped.
14098 */
14099 ret = hfi1_pcie_ddinit(dd, pdev, ent);
14100 if (ret < 0)
14101 goto bail_free;
14102
14103 /* verify that reads actually work, save revision for reset check */
14104 dd->revision = read_csr(dd, CCE_REVISION);
14105 if (dd->revision == ~(u64)0) {
14106 dd_dev_err(dd, "cannot read chip CSRs\n");
14107 ret = -EINVAL;
14108 goto bail_cleanup;
14109 }
14110 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14111 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14112 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14113 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14114
14115 /* obtain the hardware ID - NOT related to unit, which is a
14116 software enumeration */
14117 reg = read_csr(dd, CCE_REVISION2);
14118 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14119 & CCE_REVISION2_HFI_ID_MASK;
14120 /* the variable size will remove unwanted bits */
14121 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14122 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14123 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14124 dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
14125 (int)dd->irev);
14126
14127 /* speeds the hardware can support */
14128 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14129 /* speeds allowed to run at */
14130 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14131 /* give a reasonable active value, will be set on link up */
14132 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14133
14134 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14135 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14136 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14137 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14138 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14139 /* fix up link widths for emulation _p */
14140 ppd = dd->pport;
14141 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14142 ppd->link_width_supported =
14143 ppd->link_width_enabled =
14144 ppd->link_width_downgrade_supported =
14145 ppd->link_width_downgrade_enabled =
14146 OPA_LINK_WIDTH_1X;
14147 }
14148 /* insure num_vls isn't larger than number of sdma engines */
14149 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14150 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014151 num_vls, dd->chip_sdma_engines);
14152 num_vls = dd->chip_sdma_engines;
14153 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014154 }
14155
14156 /*
14157 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14158 * Limit the max if larger than the field holds. If timeout is
14159 * non-zero, then the calculated field will be at least 1.
14160 *
14161 * Must be after icode is set up - the cclock rate depends
14162 * on knowing the hardware being used.
14163 */
14164 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14165 if (dd->rcv_intr_timeout_csr >
14166 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14167 dd->rcv_intr_timeout_csr =
14168 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14169 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14170 dd->rcv_intr_timeout_csr = 1;
14171
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014172 /* needs to be done before we look for the peer device */
14173 read_guid(dd);
14174
14175 /* should this device init the ASIC block? */
14176 asic_should_init(dd);
14177
Mike Marciniszyn77241052015-07-30 15:17:43 -040014178 /* obtain chip sizes, reset chip CSRs */
14179 init_chip(dd);
14180
14181 /* read in the PCIe link speed information */
14182 ret = pcie_speeds(dd);
14183 if (ret)
14184 goto bail_cleanup;
14185
Mike Marciniszyn77241052015-07-30 15:17:43 -040014186 /* read in firmware */
14187 ret = hfi1_firmware_init(dd);
14188 if (ret)
14189 goto bail_cleanup;
14190
14191 /*
14192 * In general, the PCIe Gen3 transition must occur after the
14193 * chip has been idled (so it won't initiate any PCIe transactions
14194 * e.g. an interrupt) and before the driver changes any registers
14195 * (the transition will reset the registers).
14196 *
14197 * In particular, place this call after:
14198 * - init_chip() - the chip will not initiate any PCIe transactions
14199 * - pcie_speeds() - reads the current link speed
14200 * - hfi1_firmware_init() - the needed firmware is ready to be
14201 * downloaded
14202 */
14203 ret = do_pcie_gen3_transition(dd);
14204 if (ret)
14205 goto bail_cleanup;
14206
14207 /* start setting dd values and adjusting CSRs */
14208 init_early_variables(dd);
14209
14210 parse_platform_config(dd);
14211
Dean Luick5d9157a2015-11-16 21:59:34 -050014212 ret = obtain_boardname(dd);
14213 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014214 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014215
14216 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014217 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014218 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014219 (u32)dd->majrev,
14220 (u32)dd->minrev,
14221 (dd->revision >> CCE_REVISION_SW_SHIFT)
14222 & CCE_REVISION_SW_MASK);
14223
14224 ret = set_up_context_variables(dd);
14225 if (ret)
14226 goto bail_cleanup;
14227
14228 /* set initial RXE CSRs */
14229 init_rxe(dd);
14230 /* set initial TXE CSRs */
14231 init_txe(dd);
14232 /* set initial non-RXE, non-TXE CSRs */
14233 init_other(dd);
14234 /* set up KDETH QP prefix in both RX and TX CSRs */
14235 init_kdeth_qp(dd);
14236
14237 /* send contexts must be set up before receive contexts */
14238 ret = init_send_contexts(dd);
14239 if (ret)
14240 goto bail_cleanup;
14241
14242 ret = hfi1_create_ctxts(dd);
14243 if (ret)
14244 goto bail_cleanup;
14245
14246 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14247 /*
14248 * rcd[0] is guaranteed to be valid by this point. Also, all
14249 * context are using the same value, as per the module parameter.
14250 */
14251 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14252
14253 ret = init_pervl_scs(dd);
14254 if (ret)
14255 goto bail_cleanup;
14256
14257 /* sdma init */
14258 for (i = 0; i < dd->num_pports; ++i) {
14259 ret = sdma_init(dd, i);
14260 if (ret)
14261 goto bail_cleanup;
14262 }
14263
14264 /* use contexts created by hfi1_create_ctxts */
14265 ret = set_up_interrupts(dd);
14266 if (ret)
14267 goto bail_cleanup;
14268
14269 /* set up LCB access - must be after set_up_interrupts() */
14270 init_lcb_access(dd);
14271
14272 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14273 dd->base_guid & 0xFFFFFF);
14274
14275 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14276 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14277 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14278
14279 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14280 if (ret)
14281 goto bail_clear_intr;
14282 check_fabric_firmware_versions(dd);
14283
14284 thermal_init(dd);
14285
14286 ret = init_cntrs(dd);
14287 if (ret)
14288 goto bail_clear_intr;
14289
14290 ret = init_rcverr(dd);
14291 if (ret)
14292 goto bail_free_cntrs;
14293
14294 ret = eprom_init(dd);
14295 if (ret)
14296 goto bail_free_rcverr;
14297
14298 goto bail;
14299
14300bail_free_rcverr:
14301 free_rcverr(dd);
14302bail_free_cntrs:
14303 free_cntrs(dd);
14304bail_clear_intr:
14305 clean_up_interrupts(dd);
14306bail_cleanup:
14307 hfi1_pcie_ddcleanup(dd);
14308bail_free:
14309 hfi1_free_devdata(dd);
14310 dd = ERR_PTR(ret);
14311bail:
14312 return dd;
14313}
14314
14315static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14316 u32 dw_len)
14317{
14318 u32 delta_cycles;
14319 u32 current_egress_rate = ppd->current_egress_rate;
14320 /* rates here are in units of 10^6 bits/sec */
14321
14322 if (desired_egress_rate == -1)
14323 return 0; /* shouldn't happen */
14324
14325 if (desired_egress_rate >= current_egress_rate)
14326 return 0; /* we can't help go faster, only slower */
14327
14328 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14329 egress_cycles(dw_len * 4, current_egress_rate);
14330
14331 return (u16)delta_cycles;
14332}
14333
14334
14335/**
14336 * create_pbc - build a pbc for transmission
14337 * @flags: special case flags or-ed in built pbc
14338 * @srate: static rate
14339 * @vl: vl
14340 * @dwlen: dword length (header words + data words + pbc words)
14341 *
14342 * Create a PBC with the given flags, rate, VL, and length.
14343 *
14344 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14345 * for verbs, which does not use this PSM feature. The lone other caller
14346 * is for the diagnostic interface which calls this if the user does not
14347 * supply their own PBC.
14348 */
14349u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14350 u32 dw_len)
14351{
14352 u64 pbc, delay = 0;
14353
14354 if (unlikely(srate_mbs))
14355 delay = delay_cycles(ppd, srate_mbs, dw_len);
14356
14357 pbc = flags
14358 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14359 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14360 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14361 | (dw_len & PBC_LENGTH_DWS_MASK)
14362 << PBC_LENGTH_DWS_SHIFT;
14363
14364 return pbc;
14365}
14366
14367#define SBUS_THERMAL 0x4f
14368#define SBUS_THERM_MONITOR_MODE 0x1
14369
14370#define THERM_FAILURE(dev, ret, reason) \
14371 dd_dev_err((dd), \
14372 "Thermal sensor initialization failed: %s (%d)\n", \
14373 (reason), (ret))
14374
14375/*
14376 * Initialize the Avago Thermal sensor.
14377 *
14378 * After initialization, enable polling of thermal sensor through
14379 * SBus interface. In order for this to work, the SBus Master
14380 * firmware has to be loaded due to the fact that the HW polling
14381 * logic uses SBus interrupts, which are not supported with
14382 * default firmware. Otherwise, no data will be returned through
14383 * the ASIC_STS_THERM CSR.
14384 */
14385static int thermal_init(struct hfi1_devdata *dd)
14386{
14387 int ret = 0;
14388
14389 if (dd->icode != ICODE_RTL_SILICON ||
14390 !(dd->flags & HFI1_DO_INIT_ASIC))
14391 return ret;
14392
14393 acquire_hw_mutex(dd);
14394 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014395 /* Disable polling of thermal readings */
14396 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14397 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014398 /* Thermal Sensor Initialization */
14399 /* Step 1: Reset the Thermal SBus Receiver */
14400 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14401 RESET_SBUS_RECEIVER, 0);
14402 if (ret) {
14403 THERM_FAILURE(dd, ret, "Bus Reset");
14404 goto done;
14405 }
14406 /* Step 2: Set Reset bit in Thermal block */
14407 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14408 WRITE_SBUS_RECEIVER, 0x1);
14409 if (ret) {
14410 THERM_FAILURE(dd, ret, "Therm Block Reset");
14411 goto done;
14412 }
14413 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14414 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14415 WRITE_SBUS_RECEIVER, 0x32);
14416 if (ret) {
14417 THERM_FAILURE(dd, ret, "Write Clock Div");
14418 goto done;
14419 }
14420 /* Step 4: Select temperature mode */
14421 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14422 WRITE_SBUS_RECEIVER,
14423 SBUS_THERM_MONITOR_MODE);
14424 if (ret) {
14425 THERM_FAILURE(dd, ret, "Write Mode Sel");
14426 goto done;
14427 }
14428 /* Step 5: De-assert block reset and start conversion */
14429 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14430 WRITE_SBUS_RECEIVER, 0x2);
14431 if (ret) {
14432 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14433 goto done;
14434 }
14435 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14436 msleep(22);
14437
14438 /* Enable polling of thermal readings */
14439 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14440done:
14441 release_hw_mutex(dd);
14442 return ret;
14443}
14444
14445static void handle_temp_err(struct hfi1_devdata *dd)
14446{
14447 struct hfi1_pportdata *ppd = &dd->pport[0];
14448 /*
14449 * Thermal Critical Interrupt
14450 * Put the device into forced freeze mode, take link down to
14451 * offline, and put DC into reset.
14452 */
14453 dd_dev_emerg(dd,
14454 "Critical temperature reached! Forcing device into freeze mode!\n");
14455 dd->flags |= HFI1_FORCED_FREEZE;
14456 start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
14457 /*
14458 * Shut DC down as much and as quickly as possible.
14459 *
14460 * Step 1: Take the link down to OFFLINE. This will cause the
14461 * 8051 to put the Serdes in reset. However, we don't want to
14462 * go through the entire link state machine since we want to
14463 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14464 * but rather an attempt to save the chip.
14465 * Code below is almost the same as quiet_serdes() but avoids
14466 * all the extra work and the sleeps.
14467 */
14468 ppd->driver_link_ready = 0;
14469 ppd->link_enabled = 0;
14470 set_physical_link_state(dd, PLS_OFFLINE |
14471 (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
14472 /*
14473 * Step 2: Shutdown LCB and 8051
14474 * After shutdown, do not restore DC_CFG_RESET value.
14475 */
14476 dc_shutdown(dd);
14477}