blob: 79a316acb8f48a3372d3da6634d334d0e986c277 [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070066#include "affinity.h"
Don Hiatt243d9f42017-03-20 17:26:20 -070067#include "debugfs.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Harish Chegondi8784ac02016-07-25 13:38:50 -0700126#define DEFAULT_KRCVQS 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Dean Luick372cc85a2016-04-12 11:30:51 -0700129/* sizes for both the QP and RSM map tables */
130#define NUM_MAP_ENTRIES 256
Mike Marciniszyn77241052015-07-30 15:17:43 -0400131#define NUM_MAP_REGS 32
132
133/* Bit offset into the GUID which carries HFI id information */
134#define GUID_HFI_INDEX_SHIFT 39
135
136/* extract the emulation revision */
137#define emulator_rev(dd) ((dd)->irev >> 8)
138/* parallel and serial emulation versions are 3 and 4 respectively */
139#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
140#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
141
142/* RSM fields */
143
144/* packet type */
145#define IB_PACKET_TYPE 2ull
146#define QW_SHIFT 6ull
147/* QPN[7..1] */
148#define QPN_WIDTH 7ull
149
150/* LRH.BTH: QW 0, OFFSET 48 - for match */
151#define LRH_BTH_QW 0ull
152#define LRH_BTH_BIT_OFFSET 48ull
153#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
154#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
155#define LRH_BTH_SELECT
156#define LRH_BTH_MASK 3ull
157#define LRH_BTH_VALUE 2ull
158
159/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
160#define LRH_SC_QW 0ull
161#define LRH_SC_BIT_OFFSET 56ull
162#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
163#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
164#define LRH_SC_MASK 128ull
165#define LRH_SC_VALUE 0ull
166
167/* SC[n..0] QW 0, OFFSET 60 - for select */
168#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
169
170/* QPN[m+n:1] QW 1, OFFSET 1 */
171#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
172
173/* defines to build power on SC2VL table */
174#define SC2VL_VAL( \
175 num, \
176 sc0, sc0val, \
177 sc1, sc1val, \
178 sc2, sc2val, \
179 sc3, sc3val, \
180 sc4, sc4val, \
181 sc5, sc5val, \
182 sc6, sc6val, \
183 sc7, sc7val) \
184( \
185 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
186 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
187 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
188 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
189 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
190 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
191 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
192 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
193)
194
195#define DC_SC_VL_VAL( \
196 range, \
197 e0, e0val, \
198 e1, e1val, \
199 e2, e2val, \
200 e3, e3val, \
201 e4, e4val, \
202 e5, e5val, \
203 e6, e6val, \
204 e7, e7val, \
205 e8, e8val, \
206 e9, e9val, \
207 e10, e10val, \
208 e11, e11val, \
209 e12, e12val, \
210 e13, e13val, \
211 e14, e14val, \
212 e15, e15val) \
213( \
214 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
215 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
216 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
217 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
218 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
219 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
220 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
221 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
222 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
223 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
224 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
225 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
226 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
227 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
228 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
229 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
230)
231
232/* all CceStatus sub-block freeze bits */
233#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
234 | CCE_STATUS_RXE_FROZE_SMASK \
235 | CCE_STATUS_TXE_FROZE_SMASK \
236 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
237/* all CceStatus sub-block TXE pause bits */
238#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
239 | CCE_STATUS_TXE_PAUSED_SMASK \
240 | CCE_STATUS_SDMA_PAUSED_SMASK)
241/* all CceStatus sub-block RXE pause bits */
242#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
243
Jakub Pawlak2b719042016-07-01 16:01:22 -0700244#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
245#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
246
Mike Marciniszyn77241052015-07-30 15:17:43 -0400247/*
248 * CCE Error flags.
249 */
250static struct flag_table cce_err_status_flags[] = {
251/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
252 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
253/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
254 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
255/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
256 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
257/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
258 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
259/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
260 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
261/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
262 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
263/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
264 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
265/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
266 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
267/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
268 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
269/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
270 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
271/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
272 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
273/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
274 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
275/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
276 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
277/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
278 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
279/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
280 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
281/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
282 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
283/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
284 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
285/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
286 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
287/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
288 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
289/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
290 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
291/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
292 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
293/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
294 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
295/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
296 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
297/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
298 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
299/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
300 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
301/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
302 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
303/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
304 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
305/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
306 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
307/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
308 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
309/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
310 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
311/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
312 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
313/*31*/ FLAG_ENTRY0("LATriggered",
314 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
315/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
316 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
317/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
318 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
319/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
320 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
321/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
322 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
323/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
324 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
325/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
326 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
327/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
328 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
329/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
330 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
331/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
332 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
333/*41-63 reserved*/
334};
335
336/*
337 * Misc Error flags
338 */
339#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
340static struct flag_table misc_err_status_flags[] = {
341/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
342/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
343/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
344/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
345/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
346/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
347/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
348/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
349/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
350/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
351/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
352/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
353/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
354};
355
356/*
357 * TXE PIO Error flags and consequences
358 */
359static struct flag_table pio_err_status_flags[] = {
360/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
361 SEC_WRITE_DROPPED,
362 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
363/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
364 SEC_SPC_FREEZE,
365 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
366/* 2*/ FLAG_ENTRY("PioCsrParity",
367 SEC_SPC_FREEZE,
368 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
369/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
370 SEC_SPC_FREEZE,
371 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
372/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
373 SEC_SPC_FREEZE,
374 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
375/* 5*/ FLAG_ENTRY("PioPccFifoParity",
376 SEC_SPC_FREEZE,
377 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
378/* 6*/ FLAG_ENTRY("PioPecFifoParity",
379 SEC_SPC_FREEZE,
380 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
381/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
382 SEC_SPC_FREEZE,
383 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
384/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
385 SEC_SPC_FREEZE,
386 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
387/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
388 SEC_SPC_FREEZE,
389 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
390/*10*/ FLAG_ENTRY("PioSmPktResetParity",
391 SEC_SPC_FREEZE,
392 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
393/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
394 SEC_SPC_FREEZE,
395 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
396/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
397 SEC_SPC_FREEZE,
398 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
399/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
400 0,
401 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
402/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
403 0,
404 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
405/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
406 SEC_SPC_FREEZE,
407 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
408/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
409 SEC_SPC_FREEZE,
410 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
411/*17*/ FLAG_ENTRY("PioInitSmIn",
412 0,
413 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
414/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
415 SEC_SPC_FREEZE,
416 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
417/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
418 SEC_SPC_FREEZE,
419 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
420/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
421 0,
422 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
423/*21*/ FLAG_ENTRY("PioWriteDataParity",
424 SEC_SPC_FREEZE,
425 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
426/*22*/ FLAG_ENTRY("PioStateMachine",
427 SEC_SPC_FREEZE,
428 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
429/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800430 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400431 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
432/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800433 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400434 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
435/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
436 SEC_SPC_FREEZE,
437 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
438/*26*/ FLAG_ENTRY("PioVlfSopParity",
439 SEC_SPC_FREEZE,
440 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
441/*27*/ FLAG_ENTRY("PioVlFifoParity",
442 SEC_SPC_FREEZE,
443 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
444/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
445 SEC_SPC_FREEZE,
446 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
447/*29*/ FLAG_ENTRY("PioPpmcSopLen",
448 SEC_SPC_FREEZE,
449 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
450/*30-31 reserved*/
451/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
454/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
457/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
458 SEC_SPC_FREEZE,
459 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
460/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
461 SEC_SPC_FREEZE,
462 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
463/*36-63 reserved*/
464};
465
466/* TXE PIO errors that cause an SPC freeze */
467#define ALL_PIO_FREEZE_ERR \
468 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
495 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
496 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
497
498/*
499 * TXE SDMA Error flags
500 */
501static struct flag_table sdma_err_status_flags[] = {
502/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
503 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
504/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
505 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
506/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
507 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
508/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
509 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
510/*04-63 reserved*/
511};
512
513/* TXE SDMA errors that cause an SPC freeze */
514#define ALL_SDMA_FREEZE_ERR \
515 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
516 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
517 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
518
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800519/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
520#define PORT_DISCARD_EGRESS_ERRS \
521 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
522 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
523 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
524
Mike Marciniszyn77241052015-07-30 15:17:43 -0400525/*
526 * TXE Egress Error flags
527 */
528#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
529static struct flag_table egress_err_status_flags[] = {
530/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
531/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
532/* 2 reserved */
533/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
534 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
535/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
536/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
537/* 6 reserved */
538/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
539 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
540/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
541 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
542/* 9-10 reserved */
543/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
544 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
545/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
546/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
547/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
548/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
549/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
550 SEES(TX_SDMA0_DISALLOWED_PACKET)),
551/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
552 SEES(TX_SDMA1_DISALLOWED_PACKET)),
553/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
554 SEES(TX_SDMA2_DISALLOWED_PACKET)),
555/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
556 SEES(TX_SDMA3_DISALLOWED_PACKET)),
557/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
558 SEES(TX_SDMA4_DISALLOWED_PACKET)),
559/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
560 SEES(TX_SDMA5_DISALLOWED_PACKET)),
561/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
562 SEES(TX_SDMA6_DISALLOWED_PACKET)),
563/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
564 SEES(TX_SDMA7_DISALLOWED_PACKET)),
565/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
566 SEES(TX_SDMA8_DISALLOWED_PACKET)),
567/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
568 SEES(TX_SDMA9_DISALLOWED_PACKET)),
569/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
570 SEES(TX_SDMA10_DISALLOWED_PACKET)),
571/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
572 SEES(TX_SDMA11_DISALLOWED_PACKET)),
573/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
574 SEES(TX_SDMA12_DISALLOWED_PACKET)),
575/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
576 SEES(TX_SDMA13_DISALLOWED_PACKET)),
577/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
578 SEES(TX_SDMA14_DISALLOWED_PACKET)),
579/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
580 SEES(TX_SDMA15_DISALLOWED_PACKET)),
581/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
582 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
583/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
584 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
585/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
586 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
587/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
588 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
589/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
590 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
591/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
592 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
593/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
594 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
595/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
596 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
597/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
598 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
599/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
600/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
601/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
602/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
603/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
604/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
605/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
606/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
607/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
608/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
609/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
610/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
611/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
612/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
613/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
614/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
615/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
616/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
617/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
618/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
619/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
620/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
621 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
622/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
623 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
624};
625
626/*
627 * TXE Egress Error Info flags
628 */
629#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
630static struct flag_table egress_err_info_flags[] = {
631/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
632/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
633/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
635/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
636/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
637/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
638/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
639/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
640/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
641/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
642/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
643/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
644/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
645/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
646/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
647/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
648/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
649/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
650/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
651/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
652/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
653};
654
655/* TXE Egress errors that cause an SPC freeze */
656#define ALL_TXE_EGRESS_FREEZE_ERR \
657 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
658 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
659 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
660 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
661 | SEES(TX_LAUNCH_CSR_PARITY) \
662 | SEES(TX_SBRD_CTL_CSR_PARITY) \
663 | SEES(TX_CONFIG_PARITY) \
664 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
671 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
672 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
673 | SEES(TX_CREDIT_RETURN_PARITY))
674
675/*
676 * TXE Send error flags
677 */
678#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
679static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500680/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400681/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
682/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
683};
684
685/*
686 * TXE Send Context Error flags and consequences
687 */
688static struct flag_table sc_err_status_flags[] = {
689/* 0*/ FLAG_ENTRY("InconsistentSop",
690 SEC_PACKET_DROPPED | SEC_SC_HALTED,
691 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
692/* 1*/ FLAG_ENTRY("DisallowedPacket",
693 SEC_PACKET_DROPPED | SEC_SC_HALTED,
694 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
695/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
696 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
698/* 3*/ FLAG_ENTRY("WriteOverflow",
699 SEC_WRITE_DROPPED | SEC_SC_HALTED,
700 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
701/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
702 SEC_WRITE_DROPPED | SEC_SC_HALTED,
703 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
704/* 5-63 reserved*/
705};
706
707/*
708 * RXE Receive Error flags
709 */
710#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
711static struct flag_table rxe_err_status_flags[] = {
712/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
713/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
714/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
715/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
716/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
717/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
718/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
719/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
720/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
721/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
722/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
723/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
724/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
725/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
726/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
727/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
728/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
729 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
730/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
731/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
732/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
733 RXES(RBUF_BLOCK_LIST_READ_UNC)),
734/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
735 RXES(RBUF_BLOCK_LIST_READ_COR)),
736/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
737 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
738/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
739 RXES(RBUF_CSR_QENT_CNT_PARITY)),
740/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
741 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
742/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
743 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
744/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
745/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
746/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
747 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
748/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
749/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
750/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
751/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
752/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
753/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
754/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
755/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
756 RXES(RBUF_FL_INITDONE_PARITY)),
757/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
758 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
759/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
760/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
761/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
762/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
763 RXES(LOOKUP_DES_PART1_UNC_COR)),
764/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
765 RXES(LOOKUP_DES_PART2_PARITY)),
766/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
767/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
768/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
769/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
770/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
771/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
772/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
773/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
774/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
775/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
776/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
777/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
778/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
779/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
780/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
781/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
782/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
783/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
784/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
785/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
786/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
787/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
788};
789
790/* RXE errors that will trigger an SPC freeze */
791#define ALL_RXE_FREEZE_ERR \
792 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
836
837#define RXE_FREEZE_ABORT_MASK \
838 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
839 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
840 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
841
842/*
843 * DCC Error Flags
844 */
845#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
846static struct flag_table dcc_err_flags[] = {
847 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
848 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
849 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
850 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
851 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
852 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
853 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
854 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
855 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
856 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
857 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
858 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
859 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
860 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
861 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
862 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
863 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
864 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
865 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
866 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
867 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
868 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
869 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
870 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
871 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
872 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
873 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
874 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
875 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
876 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
877 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
878 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
879 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
880 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
881 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
882 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
883 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
884 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
885 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
886 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
887 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
888 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
889 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
890 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
891 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
892 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
893};
894
895/*
896 * LCB error flags
897 */
898#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
899static struct flag_table lcb_err_flags[] = {
900/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
901/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
902/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
903/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
904 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
905/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
906/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
907/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
908/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
909/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
910/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
911/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
912/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
913/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
914/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
915 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
916/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
917/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
918/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
919/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
920/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
921/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
922 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
923/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
924/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
925/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
926/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
927/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
928/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
929/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
930 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
931/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
932/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
933 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
934/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
935 LCBE(REDUNDANT_FLIT_PARITY_ERR))
936};
937
938/*
939 * DC8051 Error Flags
940 */
941#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
942static struct flag_table dc8051_err_flags[] = {
943 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
944 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
945 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
946 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
947 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
948 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
949 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
950 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
951 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800952 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400953 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
954};
955
956/*
957 * DC8051 Information Error flags
958 *
959 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
960 */
961static struct flag_table dc8051_info_err_flags[] = {
962 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
963 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
964 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
965 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800966 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400967 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
968 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
969 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
970 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
971 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
972 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
973 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -0800974 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
Dean Luick50921be2016-09-25 07:41:53 -0700975 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
976 FLAG_ENTRY0("External Device Request Timeout",
977 EXTERNAL_DEVICE_REQ_TIMEOUT),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400978};
979
980/*
981 * DC8051 Information Host Information flags
982 *
983 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
984 */
985static struct flag_table dc8051_info_host_msg_flags[] = {
986 FLAG_ENTRY0("Host request done", 0x0001),
987 FLAG_ENTRY0("BC SMA message", 0x0002),
988 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
989 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
990 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
991 FLAG_ENTRY0("External device config request", 0x0020),
992 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
993 FLAG_ENTRY0("LinkUp achieved", 0x0080),
994 FLAG_ENTRY0("Link going down", 0x0100),
995};
996
Mike Marciniszyn77241052015-07-30 15:17:43 -0400997static u32 encoded_size(u32 size);
998static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
999static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1000static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1001 u8 *continuous);
1002static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1003 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1004static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1005 u8 *remote_tx_rate, u16 *link_widths);
1006static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1007 u8 *flag_bits, u16 *link_widths);
1008static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1009 u8 *device_rev);
1010static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1011static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1012static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1013 u8 *tx_polarity_inversion,
1014 u8 *rx_polarity_inversion, u8 *max_rate);
1015static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1016 unsigned int context, u64 err_status);
1017static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1018static void handle_dcc_err(struct hfi1_devdata *dd,
1019 unsigned int context, u64 err_status);
1020static void handle_lcb_err(struct hfi1_devdata *dd,
1021 unsigned int context, u64 err_status);
1022static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1030static void set_partition_keys(struct hfi1_pportdata *);
1031static const char *link_state_name(u32 state);
1032static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1033 u32 state);
1034static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1035 u64 *out_data);
1036static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1037static int thermal_init(struct hfi1_devdata *dd);
1038
1039static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1040 int msecs);
1041static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001042static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001043static void handle_temp_err(struct hfi1_devdata *);
1044static void dc_shutdown(struct hfi1_devdata *);
1045static void dc_start(struct hfi1_devdata *);
Dean Luick8f000f72016-04-12 11:32:06 -07001046static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1047 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001048static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Dean Luickec8a1422017-03-20 17:24:39 -07001049static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001050
1051/*
1052 * Error interrupt table entry. This is used as input to the interrupt
1053 * "clear down" routine used for all second tier error interrupt register.
1054 * Second tier interrupt registers have a single bit representing them
1055 * in the top-level CceIntStatus.
1056 */
1057struct err_reg_info {
1058 u32 status; /* status CSR offset */
1059 u32 clear; /* clear CSR offset */
1060 u32 mask; /* mask CSR offset */
1061 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1062 const char *desc;
1063};
1064
1065#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1066#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1067#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1068
1069/*
1070 * Helpers for building HFI and DC error interrupt table entries. Different
1071 * helpers are needed because of inconsistent register names.
1072 */
1073#define EE(reg, handler, desc) \
1074 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1075 handler, desc }
1076#define DC_EE1(reg, handler, desc) \
1077 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1078#define DC_EE2(reg, handler, desc) \
1079 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1080
1081/*
1082 * Table of the "misc" grouping of error interrupts. Each entry refers to
1083 * another register containing more information.
1084 */
1085static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1086/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1087/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1088/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1089/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1090/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1091/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1092/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1093/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1094 /* the rest are reserved */
1095};
1096
1097/*
1098 * Index into the Various section of the interrupt sources
1099 * corresponding to the Critical Temperature interrupt.
1100 */
1101#define TCRIT_INT_SOURCE 4
1102
1103/*
1104 * SDMA error interrupt entry - refers to another register containing more
1105 * information.
1106 */
1107static const struct err_reg_info sdma_eng_err =
1108 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1109
1110static const struct err_reg_info various_err[NUM_VARIOUS] = {
1111/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1112/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1113/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1114/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1115/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1116 /* rest are reserved */
1117};
1118
1119/*
1120 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1121 * register can not be derived from the MTU value because 10K is not
1122 * a power of 2. Therefore, we need a constant. Everything else can
1123 * be calculated.
1124 */
1125#define DCC_CFG_PORT_MTU_CAP_10240 7
1126
1127/*
1128 * Table of the DC grouping of error interrupts. Each entry refers to
1129 * another register containing more information.
1130 */
1131static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1132/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1133/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1134/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1135/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1136 /* the rest are reserved */
1137};
1138
1139struct cntr_entry {
1140 /*
1141 * counter name
1142 */
1143 char *name;
1144
1145 /*
1146 * csr to read for name (if applicable)
1147 */
1148 u64 csr;
1149
1150 /*
1151 * offset into dd or ppd to store the counter's value
1152 */
1153 int offset;
1154
1155 /*
1156 * flags
1157 */
1158 u8 flags;
1159
1160 /*
1161 * accessor for stat element, context either dd or ppd
1162 */
Jubin John17fb4f22016-02-14 20:21:52 -08001163 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1164 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001165};
1166
1167#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1168#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1169
1170#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1171{ \
1172 name, \
1173 csr, \
1174 offset, \
1175 flags, \
1176 accessor \
1177}
1178
1179/* 32bit RXE */
1180#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1181CNTR_ELEM(#name, \
1182 (counter * 8 + RCV_COUNTER_ARRAY32), \
1183 0, flags | CNTR_32BIT, \
1184 port_access_u32_csr)
1185
1186#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1187CNTR_ELEM(#name, \
1188 (counter * 8 + RCV_COUNTER_ARRAY32), \
1189 0, flags | CNTR_32BIT, \
1190 dev_access_u32_csr)
1191
1192/* 64bit RXE */
1193#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1194CNTR_ELEM(#name, \
1195 (counter * 8 + RCV_COUNTER_ARRAY64), \
1196 0, flags, \
1197 port_access_u64_csr)
1198
1199#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1200CNTR_ELEM(#name, \
1201 (counter * 8 + RCV_COUNTER_ARRAY64), \
1202 0, flags, \
1203 dev_access_u64_csr)
1204
1205#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1206#define OVR_ELM(ctx) \
1207CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001208 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001209 0, CNTR_NORMAL, port_access_u64_csr)
1210
1211/* 32bit TXE */
1212#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1213CNTR_ELEM(#name, \
1214 (counter * 8 + SEND_COUNTER_ARRAY32), \
1215 0, flags | CNTR_32BIT, \
1216 port_access_u32_csr)
1217
1218/* 64bit TXE */
1219#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1220CNTR_ELEM(#name, \
1221 (counter * 8 + SEND_COUNTER_ARRAY64), \
1222 0, flags, \
1223 port_access_u64_csr)
1224
1225# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1226CNTR_ELEM(#name,\
1227 counter * 8 + SEND_COUNTER_ARRAY64, \
1228 0, \
1229 flags, \
1230 dev_access_u64_csr)
1231
1232/* CCE */
1233#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1234CNTR_ELEM(#name, \
1235 (counter * 8 + CCE_COUNTER_ARRAY32), \
1236 0, flags | CNTR_32BIT, \
1237 dev_access_u32_csr)
1238
1239#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1240CNTR_ELEM(#name, \
1241 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1242 0, flags | CNTR_32BIT, \
1243 dev_access_u32_csr)
1244
1245/* DC */
1246#define DC_PERF_CNTR(name, counter, flags) \
1247CNTR_ELEM(#name, \
1248 counter, \
1249 0, \
1250 flags, \
1251 dev_access_u64_csr)
1252
1253#define DC_PERF_CNTR_LCB(name, counter, flags) \
1254CNTR_ELEM(#name, \
1255 counter, \
1256 0, \
1257 flags, \
1258 dc_access_lcb_cntr)
1259
1260/* ibp counters */
1261#define SW_IBP_CNTR(name, cntr) \
1262CNTR_ELEM(#name, \
1263 0, \
1264 0, \
1265 CNTR_SYNTH, \
1266 access_ibp_##cntr)
1267
1268u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1269{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001270 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301271 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001272 }
1273 return -1;
1274}
1275
1276void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1277{
1278 if (dd->flags & HFI1_PRESENT)
1279 writeq(value, (void __iomem *)dd->kregbase + offset);
1280}
1281
1282void __iomem *get_csr_addr(
1283 struct hfi1_devdata *dd,
1284 u32 offset)
1285{
1286 return (void __iomem *)dd->kregbase + offset;
1287}
1288
1289static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1290 int mode, u64 value)
1291{
1292 u64 ret;
1293
Mike Marciniszyn77241052015-07-30 15:17:43 -04001294 if (mode == CNTR_MODE_R) {
1295 ret = read_csr(dd, csr);
1296 } else if (mode == CNTR_MODE_W) {
1297 write_csr(dd, csr, value);
1298 ret = value;
1299 } else {
1300 dd_dev_err(dd, "Invalid cntr register access mode");
1301 return 0;
1302 }
1303
1304 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1305 return ret;
1306}
1307
1308/* Dev Access */
1309static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001310 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001311{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301312 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001313 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001314
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001315 if (entry->flags & CNTR_SDMA) {
1316 if (vl == CNTR_INVALID_VL)
1317 return 0;
1318 csr += 0x100 * vl;
1319 } else {
1320 if (vl != CNTR_INVALID_VL)
1321 return 0;
1322 }
1323 return read_write_csr(dd, csr, mode, data);
1324}
1325
1326static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1327 void *context, int idx, int mode, u64 data)
1328{
1329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1330
1331 if (dd->per_sdma && idx < dd->num_sdma)
1332 return dd->per_sdma[idx].err_cnt;
1333 return 0;
1334}
1335
1336static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1337 void *context, int idx, int mode, u64 data)
1338{
1339 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1340
1341 if (dd->per_sdma && idx < dd->num_sdma)
1342 return dd->per_sdma[idx].sdma_int_cnt;
1343 return 0;
1344}
1345
1346static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1347 void *context, int idx, int mode, u64 data)
1348{
1349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1350
1351 if (dd->per_sdma && idx < dd->num_sdma)
1352 return dd->per_sdma[idx].idle_int_cnt;
1353 return 0;
1354}
1355
1356static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1357 void *context, int idx, int mode,
1358 u64 data)
1359{
1360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1361
1362 if (dd->per_sdma && idx < dd->num_sdma)
1363 return dd->per_sdma[idx].progress_int_cnt;
1364 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001365}
1366
1367static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001368 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001369{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301370 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001371
1372 u64 val = 0;
1373 u64 csr = entry->csr;
1374
1375 if (entry->flags & CNTR_VL) {
1376 if (vl == CNTR_INVALID_VL)
1377 return 0;
1378 csr += 8 * vl;
1379 } else {
1380 if (vl != CNTR_INVALID_VL)
1381 return 0;
1382 }
1383
1384 val = read_write_csr(dd, csr, mode, data);
1385 return val;
1386}
1387
1388static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001389 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001390{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301391 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001392 u32 csr = entry->csr;
1393 int ret = 0;
1394
1395 if (vl != CNTR_INVALID_VL)
1396 return 0;
1397 if (mode == CNTR_MODE_R)
1398 ret = read_lcb_csr(dd, csr, &data);
1399 else if (mode == CNTR_MODE_W)
1400 ret = write_lcb_csr(dd, csr, data);
1401
1402 if (ret) {
1403 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1404 return 0;
1405 }
1406
1407 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1408 return data;
1409}
1410
1411/* Port Access */
1412static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001413 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001414{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301415 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001416
1417 if (vl != CNTR_INVALID_VL)
1418 return 0;
1419 return read_write_csr(ppd->dd, entry->csr, mode, data);
1420}
1421
1422static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001423 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001424{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301425 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001426 u64 val;
1427 u64 csr = entry->csr;
1428
1429 if (entry->flags & CNTR_VL) {
1430 if (vl == CNTR_INVALID_VL)
1431 return 0;
1432 csr += 8 * vl;
1433 } else {
1434 if (vl != CNTR_INVALID_VL)
1435 return 0;
1436 }
1437 val = read_write_csr(ppd->dd, csr, mode, data);
1438 return val;
1439}
1440
1441/* Software defined */
1442static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1443 u64 data)
1444{
1445 u64 ret;
1446
1447 if (mode == CNTR_MODE_R) {
1448 ret = *cntr;
1449 } else if (mode == CNTR_MODE_W) {
1450 *cntr = data;
1451 ret = data;
1452 } else {
1453 dd_dev_err(dd, "Invalid cntr sw access mode");
1454 return 0;
1455 }
1456
1457 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1458
1459 return ret;
1460}
1461
1462static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001463 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301465 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001466
1467 if (vl != CNTR_INVALID_VL)
1468 return 0;
1469 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1470}
1471
1472static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001473 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001474{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301475 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001476
1477 if (vl != CNTR_INVALID_VL)
1478 return 0;
1479 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1480}
1481
Dean Luick6d014532015-12-01 15:38:23 -05001482static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1483 void *context, int vl, int mode,
1484 u64 data)
1485{
1486 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1487
1488 if (vl != CNTR_INVALID_VL)
1489 return 0;
1490 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1491}
1492
Mike Marciniszyn77241052015-07-30 15:17:43 -04001493static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001494 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001495{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001496 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1497 u64 zero = 0;
1498 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001499
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001500 if (vl == CNTR_INVALID_VL)
1501 counter = &ppd->port_xmit_discards;
1502 else if (vl >= 0 && vl < C_VL_COUNT)
1503 counter = &ppd->port_xmit_discards_vl[vl];
1504 else
1505 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001507 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001508}
1509
1510static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001511 void *context, int vl, int mode,
1512 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001513{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301514 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001515
1516 if (vl != CNTR_INVALID_VL)
1517 return 0;
1518
1519 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1520 mode, data);
1521}
1522
1523static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001524 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001525{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301526 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001527
1528 if (vl != CNTR_INVALID_VL)
1529 return 0;
1530
1531 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1532 mode, data);
1533}
1534
1535u64 get_all_cpu_total(u64 __percpu *cntr)
1536{
1537 int cpu;
1538 u64 counter = 0;
1539
1540 for_each_possible_cpu(cpu)
1541 counter += *per_cpu_ptr(cntr, cpu);
1542 return counter;
1543}
1544
1545static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1546 u64 __percpu *cntr,
1547 int vl, int mode, u64 data)
1548{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001549 u64 ret = 0;
1550
1551 if (vl != CNTR_INVALID_VL)
1552 return 0;
1553
1554 if (mode == CNTR_MODE_R) {
1555 ret = get_all_cpu_total(cntr) - *z_val;
1556 } else if (mode == CNTR_MODE_W) {
1557 /* A write can only zero the counter */
1558 if (data == 0)
1559 *z_val = get_all_cpu_total(cntr);
1560 else
1561 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1562 } else {
1563 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1564 return 0;
1565 }
1566
1567 return ret;
1568}
1569
1570static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1571 void *context, int vl, int mode, u64 data)
1572{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301573 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001574
1575 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1576 mode, data);
1577}
1578
1579static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001580 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001581{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301582 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001583
1584 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1585 mode, data);
1586}
1587
1588static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1589 void *context, int vl, int mode, u64 data)
1590{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301591 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001592
1593 return dd->verbs_dev.n_piowait;
1594}
1595
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001596static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1597 void *context, int vl, int mode, u64 data)
1598{
1599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1600
1601 return dd->verbs_dev.n_piodrain;
1602}
1603
Mike Marciniszyn77241052015-07-30 15:17:43 -04001604static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1605 void *context, int vl, int mode, u64 data)
1606{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301607 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001608
1609 return dd->verbs_dev.n_txwait;
1610}
1611
1612static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1613 void *context, int vl, int mode, u64 data)
1614{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301615 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001616
1617 return dd->verbs_dev.n_kmem_wait;
1618}
1619
Dean Luickb4219222015-10-26 10:28:35 -04001620static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001621 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001622{
1623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1624
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001625 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1626 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001627}
1628
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001629/* Software counters for the error status bits within MISC_ERR_STATUS */
1630static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1631 void *context, int vl, int mode,
1632 u64 data)
1633{
1634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635
1636 return dd->misc_err_status_cnt[12];
1637}
1638
1639static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1640 void *context, int vl, int mode,
1641 u64 data)
1642{
1643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644
1645 return dd->misc_err_status_cnt[11];
1646}
1647
1648static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1649 void *context, int vl, int mode,
1650 u64 data)
1651{
1652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653
1654 return dd->misc_err_status_cnt[10];
1655}
1656
1657static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1658 void *context, int vl,
1659 int mode, u64 data)
1660{
1661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662
1663 return dd->misc_err_status_cnt[9];
1664}
1665
1666static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1667 void *context, int vl, int mode,
1668 u64 data)
1669{
1670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671
1672 return dd->misc_err_status_cnt[8];
1673}
1674
1675static u64 access_misc_efuse_read_bad_addr_err_cnt(
1676 const struct cntr_entry *entry,
1677 void *context, int vl, int mode, u64 data)
1678{
1679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681 return dd->misc_err_status_cnt[7];
1682}
1683
1684static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1685 void *context, int vl,
1686 int mode, u64 data)
1687{
1688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689
1690 return dd->misc_err_status_cnt[6];
1691}
1692
1693static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1694 void *context, int vl, int mode,
1695 u64 data)
1696{
1697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698
1699 return dd->misc_err_status_cnt[5];
1700}
1701
1702static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1703 void *context, int vl, int mode,
1704 u64 data)
1705{
1706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708 return dd->misc_err_status_cnt[4];
1709}
1710
1711static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1712 void *context, int vl,
1713 int mode, u64 data)
1714{
1715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717 return dd->misc_err_status_cnt[3];
1718}
1719
1720static u64 access_misc_csr_write_bad_addr_err_cnt(
1721 const struct cntr_entry *entry,
1722 void *context, int vl, int mode, u64 data)
1723{
1724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1725
1726 return dd->misc_err_status_cnt[2];
1727}
1728
1729static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1730 void *context, int vl,
1731 int mode, u64 data)
1732{
1733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1734
1735 return dd->misc_err_status_cnt[1];
1736}
1737
1738static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1739 void *context, int vl, int mode,
1740 u64 data)
1741{
1742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1743
1744 return dd->misc_err_status_cnt[0];
1745}
1746
1747/*
1748 * Software counter for the aggregate of
1749 * individual CceErrStatus counters
1750 */
1751static u64 access_sw_cce_err_status_aggregated_cnt(
1752 const struct cntr_entry *entry,
1753 void *context, int vl, int mode, u64 data)
1754{
1755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756
1757 return dd->sw_cce_err_status_aggregate;
1758}
1759
1760/*
1761 * Software counters corresponding to each of the
1762 * error status bits within CceErrStatus
1763 */
1764static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1765 void *context, int vl, int mode,
1766 u64 data)
1767{
1768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769
1770 return dd->cce_err_status_cnt[40];
1771}
1772
1773static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1774 void *context, int vl, int mode,
1775 u64 data)
1776{
1777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778
1779 return dd->cce_err_status_cnt[39];
1780}
1781
1782static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1783 void *context, int vl, int mode,
1784 u64 data)
1785{
1786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788 return dd->cce_err_status_cnt[38];
1789}
1790
1791static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1792 void *context, int vl, int mode,
1793 u64 data)
1794{
1795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796
1797 return dd->cce_err_status_cnt[37];
1798}
1799
1800static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1801 void *context, int vl, int mode,
1802 u64 data)
1803{
1804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805
1806 return dd->cce_err_status_cnt[36];
1807}
1808
1809static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1810 const struct cntr_entry *entry,
1811 void *context, int vl, int mode, u64 data)
1812{
1813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814
1815 return dd->cce_err_status_cnt[35];
1816}
1817
1818static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1819 const struct cntr_entry *entry,
1820 void *context, int vl, int mode, u64 data)
1821{
1822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1823
1824 return dd->cce_err_status_cnt[34];
1825}
1826
1827static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1828 void *context, int vl,
1829 int mode, u64 data)
1830{
1831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1832
1833 return dd->cce_err_status_cnt[33];
1834}
1835
1836static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1837 void *context, int vl, int mode,
1838 u64 data)
1839{
1840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1841
1842 return dd->cce_err_status_cnt[32];
1843}
1844
1845static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1846 void *context, int vl, int mode, u64 data)
1847{
1848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849
1850 return dd->cce_err_status_cnt[31];
1851}
1852
1853static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1854 void *context, int vl, int mode,
1855 u64 data)
1856{
1857 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858
1859 return dd->cce_err_status_cnt[30];
1860}
1861
1862static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1863 void *context, int vl, int mode,
1864 u64 data)
1865{
1866 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867
1868 return dd->cce_err_status_cnt[29];
1869}
1870
1871static u64 access_pcic_transmit_back_parity_err_cnt(
1872 const struct cntr_entry *entry,
1873 void *context, int vl, int mode, u64 data)
1874{
1875 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876
1877 return dd->cce_err_status_cnt[28];
1878}
1879
1880static u64 access_pcic_transmit_front_parity_err_cnt(
1881 const struct cntr_entry *entry,
1882 void *context, int vl, int mode, u64 data)
1883{
1884 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885
1886 return dd->cce_err_status_cnt[27];
1887}
1888
1889static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1890 void *context, int vl, int mode,
1891 u64 data)
1892{
1893 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894
1895 return dd->cce_err_status_cnt[26];
1896}
1897
1898static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1899 void *context, int vl, int mode,
1900 u64 data)
1901{
1902 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903
1904 return dd->cce_err_status_cnt[25];
1905}
1906
1907static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1908 void *context, int vl, int mode,
1909 u64 data)
1910{
1911 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912
1913 return dd->cce_err_status_cnt[24];
1914}
1915
1916static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1917 void *context, int vl, int mode,
1918 u64 data)
1919{
1920 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921
1922 return dd->cce_err_status_cnt[23];
1923}
1924
1925static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1926 void *context, int vl,
1927 int mode, u64 data)
1928{
1929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930
1931 return dd->cce_err_status_cnt[22];
1932}
1933
1934static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1935 void *context, int vl, int mode,
1936 u64 data)
1937{
1938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939
1940 return dd->cce_err_status_cnt[21];
1941}
1942
1943static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1944 const struct cntr_entry *entry,
1945 void *context, int vl, int mode, u64 data)
1946{
1947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948
1949 return dd->cce_err_status_cnt[20];
1950}
1951
1952static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1953 void *context, int vl,
1954 int mode, u64 data)
1955{
1956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957
1958 return dd->cce_err_status_cnt[19];
1959}
1960
1961static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1962 void *context, int vl, int mode,
1963 u64 data)
1964{
1965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966
1967 return dd->cce_err_status_cnt[18];
1968}
1969
1970static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1971 void *context, int vl, int mode,
1972 u64 data)
1973{
1974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975
1976 return dd->cce_err_status_cnt[17];
1977}
1978
1979static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1980 void *context, int vl, int mode,
1981 u64 data)
1982{
1983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984
1985 return dd->cce_err_status_cnt[16];
1986}
1987
1988static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1989 void *context, int vl, int mode,
1990 u64 data)
1991{
1992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993
1994 return dd->cce_err_status_cnt[15];
1995}
1996
1997static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1998 void *context, int vl,
1999 int mode, u64 data)
2000{
2001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002
2003 return dd->cce_err_status_cnt[14];
2004}
2005
2006static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2007 void *context, int vl, int mode,
2008 u64 data)
2009{
2010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011
2012 return dd->cce_err_status_cnt[13];
2013}
2014
2015static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2016 const struct cntr_entry *entry,
2017 void *context, int vl, int mode, u64 data)
2018{
2019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020
2021 return dd->cce_err_status_cnt[12];
2022}
2023
2024static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2025 const struct cntr_entry *entry,
2026 void *context, int vl, int mode, u64 data)
2027{
2028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029
2030 return dd->cce_err_status_cnt[11];
2031}
2032
2033static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2034 const struct cntr_entry *entry,
2035 void *context, int vl, int mode, u64 data)
2036{
2037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038
2039 return dd->cce_err_status_cnt[10];
2040}
2041
2042static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2043 const struct cntr_entry *entry,
2044 void *context, int vl, int mode, u64 data)
2045{
2046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047
2048 return dd->cce_err_status_cnt[9];
2049}
2050
2051static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2052 const struct cntr_entry *entry,
2053 void *context, int vl, int mode, u64 data)
2054{
2055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056
2057 return dd->cce_err_status_cnt[8];
2058}
2059
2060static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2061 void *context, int vl,
2062 int mode, u64 data)
2063{
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066 return dd->cce_err_status_cnt[7];
2067}
2068
2069static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2070 const struct cntr_entry *entry,
2071 void *context, int vl, int mode, u64 data)
2072{
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075 return dd->cce_err_status_cnt[6];
2076}
2077
2078static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl, int mode,
2080 u64 data)
2081{
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084 return dd->cce_err_status_cnt[5];
2085}
2086
2087static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2088 void *context, int vl, int mode,
2089 u64 data)
2090{
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093 return dd->cce_err_status_cnt[4];
2094}
2095
2096static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2097 const struct cntr_entry *entry,
2098 void *context, int vl, int mode, u64 data)
2099{
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102 return dd->cce_err_status_cnt[3];
2103}
2104
2105static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2106 void *context, int vl,
2107 int mode, u64 data)
2108{
2109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2110
2111 return dd->cce_err_status_cnt[2];
2112}
2113
2114static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2115 void *context, int vl,
2116 int mode, u64 data)
2117{
2118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2119
2120 return dd->cce_err_status_cnt[1];
2121}
2122
2123static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2124 void *context, int vl, int mode,
2125 u64 data)
2126{
2127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2128
2129 return dd->cce_err_status_cnt[0];
2130}
2131
2132/*
2133 * Software counters corresponding to each of the
2134 * error status bits within RcvErrStatus
2135 */
2136static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2137 void *context, int vl, int mode,
2138 u64 data)
2139{
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142 return dd->rcv_err_status_cnt[63];
2143}
2144
2145static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2146 void *context, int vl,
2147 int mode, u64 data)
2148{
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151 return dd->rcv_err_status_cnt[62];
2152}
2153
2154static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl, int mode,
2156 u64 data)
2157{
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160 return dd->rcv_err_status_cnt[61];
2161}
2162
2163static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2164 void *context, int vl, int mode,
2165 u64 data)
2166{
2167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169 return dd->rcv_err_status_cnt[60];
2170}
2171
2172static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2173 void *context, int vl,
2174 int mode, u64 data)
2175{
2176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178 return dd->rcv_err_status_cnt[59];
2179}
2180
2181static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2182 void *context, int vl,
2183 int mode, u64 data)
2184{
2185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187 return dd->rcv_err_status_cnt[58];
2188}
2189
2190static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2191 void *context, int vl, int mode,
2192 u64 data)
2193{
2194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196 return dd->rcv_err_status_cnt[57];
2197}
2198
2199static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2200 void *context, int vl, int mode,
2201 u64 data)
2202{
2203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205 return dd->rcv_err_status_cnt[56];
2206}
2207
2208static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2209 void *context, int vl, int mode,
2210 u64 data)
2211{
2212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214 return dd->rcv_err_status_cnt[55];
2215}
2216
2217static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2218 const struct cntr_entry *entry,
2219 void *context, int vl, int mode, u64 data)
2220{
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223 return dd->rcv_err_status_cnt[54];
2224}
2225
2226static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2227 const struct cntr_entry *entry,
2228 void *context, int vl, int mode, u64 data)
2229{
2230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231
2232 return dd->rcv_err_status_cnt[53];
2233}
2234
2235static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2236 void *context, int vl,
2237 int mode, u64 data)
2238{
2239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240
2241 return dd->rcv_err_status_cnt[52];
2242}
2243
2244static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2245 void *context, int vl,
2246 int mode, u64 data)
2247{
2248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249
2250 return dd->rcv_err_status_cnt[51];
2251}
2252
2253static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2254 void *context, int vl,
2255 int mode, u64 data)
2256{
2257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258
2259 return dd->rcv_err_status_cnt[50];
2260}
2261
2262static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2263 void *context, int vl,
2264 int mode, u64 data)
2265{
2266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267
2268 return dd->rcv_err_status_cnt[49];
2269}
2270
2271static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2272 void *context, int vl,
2273 int mode, u64 data)
2274{
2275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276
2277 return dd->rcv_err_status_cnt[48];
2278}
2279
2280static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2281 void *context, int vl,
2282 int mode, u64 data)
2283{
2284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285
2286 return dd->rcv_err_status_cnt[47];
2287}
2288
2289static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2290 void *context, int vl, int mode,
2291 u64 data)
2292{
2293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294
2295 return dd->rcv_err_status_cnt[46];
2296}
2297
2298static u64 access_rx_hq_intr_csr_parity_err_cnt(
2299 const struct cntr_entry *entry,
2300 void *context, int vl, int mode, u64 data)
2301{
2302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303
2304 return dd->rcv_err_status_cnt[45];
2305}
2306
2307static u64 access_rx_lookup_csr_parity_err_cnt(
2308 const struct cntr_entry *entry,
2309 void *context, int vl, int mode, u64 data)
2310{
2311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312
2313 return dd->rcv_err_status_cnt[44];
2314}
2315
2316static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2317 const struct cntr_entry *entry,
2318 void *context, int vl, int mode, u64 data)
2319{
2320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321
2322 return dd->rcv_err_status_cnt[43];
2323}
2324
2325static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2326 const struct cntr_entry *entry,
2327 void *context, int vl, int mode, u64 data)
2328{
2329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330
2331 return dd->rcv_err_status_cnt[42];
2332}
2333
2334static u64 access_rx_lookup_des_part2_parity_err_cnt(
2335 const struct cntr_entry *entry,
2336 void *context, int vl, int mode, u64 data)
2337{
2338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339
2340 return dd->rcv_err_status_cnt[41];
2341}
2342
2343static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2344 const struct cntr_entry *entry,
2345 void *context, int vl, int mode, u64 data)
2346{
2347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348
2349 return dd->rcv_err_status_cnt[40];
2350}
2351
2352static u64 access_rx_lookup_des_part1_unc_err_cnt(
2353 const struct cntr_entry *entry,
2354 void *context, int vl, int mode, u64 data)
2355{
2356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357
2358 return dd->rcv_err_status_cnt[39];
2359}
2360
2361static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2362 const struct cntr_entry *entry,
2363 void *context, int vl, int mode, u64 data)
2364{
2365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366
2367 return dd->rcv_err_status_cnt[38];
2368}
2369
2370static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2371 const struct cntr_entry *entry,
2372 void *context, int vl, int mode, u64 data)
2373{
2374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375
2376 return dd->rcv_err_status_cnt[37];
2377}
2378
2379static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2380 const struct cntr_entry *entry,
2381 void *context, int vl, int mode, u64 data)
2382{
2383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384
2385 return dd->rcv_err_status_cnt[36];
2386}
2387
2388static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2389 const struct cntr_entry *entry,
2390 void *context, int vl, int mode, u64 data)
2391{
2392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393
2394 return dd->rcv_err_status_cnt[35];
2395}
2396
2397static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2398 const struct cntr_entry *entry,
2399 void *context, int vl, int mode, u64 data)
2400{
2401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402
2403 return dd->rcv_err_status_cnt[34];
2404}
2405
2406static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2407 const struct cntr_entry *entry,
2408 void *context, int vl, int mode, u64 data)
2409{
2410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411
2412 return dd->rcv_err_status_cnt[33];
2413}
2414
2415static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2416 void *context, int vl, int mode,
2417 u64 data)
2418{
2419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420
2421 return dd->rcv_err_status_cnt[32];
2422}
2423
2424static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2425 void *context, int vl, int mode,
2426 u64 data)
2427{
2428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429
2430 return dd->rcv_err_status_cnt[31];
2431}
2432
2433static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2434 void *context, int vl, int mode,
2435 u64 data)
2436{
2437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438
2439 return dd->rcv_err_status_cnt[30];
2440}
2441
2442static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2443 void *context, int vl, int mode,
2444 u64 data)
2445{
2446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447
2448 return dd->rcv_err_status_cnt[29];
2449}
2450
2451static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2452 void *context, int vl,
2453 int mode, u64 data)
2454{
2455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456
2457 return dd->rcv_err_status_cnt[28];
2458}
2459
2460static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2461 const struct cntr_entry *entry,
2462 void *context, int vl, int mode, u64 data)
2463{
2464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465
2466 return dd->rcv_err_status_cnt[27];
2467}
2468
2469static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2470 const struct cntr_entry *entry,
2471 void *context, int vl, int mode, u64 data)
2472{
2473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474
2475 return dd->rcv_err_status_cnt[26];
2476}
2477
2478static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2479 const struct cntr_entry *entry,
2480 void *context, int vl, int mode, u64 data)
2481{
2482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483
2484 return dd->rcv_err_status_cnt[25];
2485}
2486
2487static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2488 const struct cntr_entry *entry,
2489 void *context, int vl, int mode, u64 data)
2490{
2491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492
2493 return dd->rcv_err_status_cnt[24];
2494}
2495
2496static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2497 const struct cntr_entry *entry,
2498 void *context, int vl, int mode, u64 data)
2499{
2500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501
2502 return dd->rcv_err_status_cnt[23];
2503}
2504
2505static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2506 const struct cntr_entry *entry,
2507 void *context, int vl, int mode, u64 data)
2508{
2509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510
2511 return dd->rcv_err_status_cnt[22];
2512}
2513
2514static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2515 const struct cntr_entry *entry,
2516 void *context, int vl, int mode, u64 data)
2517{
2518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519
2520 return dd->rcv_err_status_cnt[21];
2521}
2522
2523static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2524 const struct cntr_entry *entry,
2525 void *context, int vl, int mode, u64 data)
2526{
2527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528
2529 return dd->rcv_err_status_cnt[20];
2530}
2531
2532static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2533 const struct cntr_entry *entry,
2534 void *context, int vl, int mode, u64 data)
2535{
2536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537
2538 return dd->rcv_err_status_cnt[19];
2539}
2540
2541static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2542 void *context, int vl,
2543 int mode, u64 data)
2544{
2545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546
2547 return dd->rcv_err_status_cnt[18];
2548}
2549
2550static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2551 void *context, int vl,
2552 int mode, u64 data)
2553{
2554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555
2556 return dd->rcv_err_status_cnt[17];
2557}
2558
2559static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2560 const struct cntr_entry *entry,
2561 void *context, int vl, int mode, u64 data)
2562{
2563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564
2565 return dd->rcv_err_status_cnt[16];
2566}
2567
2568static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2569 const struct cntr_entry *entry,
2570 void *context, int vl, int mode, u64 data)
2571{
2572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573
2574 return dd->rcv_err_status_cnt[15];
2575}
2576
2577static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2578 void *context, int vl,
2579 int mode, u64 data)
2580{
2581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582
2583 return dd->rcv_err_status_cnt[14];
2584}
2585
2586static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2587 void *context, int vl,
2588 int mode, u64 data)
2589{
2590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591
2592 return dd->rcv_err_status_cnt[13];
2593}
2594
2595static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2596 void *context, int vl, int mode,
2597 u64 data)
2598{
2599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600
2601 return dd->rcv_err_status_cnt[12];
2602}
2603
2604static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2605 void *context, int vl, int mode,
2606 u64 data)
2607{
2608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609
2610 return dd->rcv_err_status_cnt[11];
2611}
2612
2613static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2614 void *context, int vl, int mode,
2615 u64 data)
2616{
2617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618
2619 return dd->rcv_err_status_cnt[10];
2620}
2621
2622static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2623 void *context, int vl, int mode,
2624 u64 data)
2625{
2626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627
2628 return dd->rcv_err_status_cnt[9];
2629}
2630
2631static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2632 void *context, int vl, int mode,
2633 u64 data)
2634{
2635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636
2637 return dd->rcv_err_status_cnt[8];
2638}
2639
2640static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2641 const struct cntr_entry *entry,
2642 void *context, int vl, int mode, u64 data)
2643{
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646 return dd->rcv_err_status_cnt[7];
2647}
2648
2649static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2650 const struct cntr_entry *entry,
2651 void *context, int vl, int mode, u64 data)
2652{
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655 return dd->rcv_err_status_cnt[6];
2656}
2657
2658static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2659 void *context, int vl, int mode,
2660 u64 data)
2661{
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664 return dd->rcv_err_status_cnt[5];
2665}
2666
2667static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2668 void *context, int vl, int mode,
2669 u64 data)
2670{
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673 return dd->rcv_err_status_cnt[4];
2674}
2675
2676static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2678 u64 data)
2679{
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682 return dd->rcv_err_status_cnt[3];
2683}
2684
2685static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2686 void *context, int vl, int mode,
2687 u64 data)
2688{
2689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2690
2691 return dd->rcv_err_status_cnt[2];
2692}
2693
2694static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2695 void *context, int vl, int mode,
2696 u64 data)
2697{
2698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2699
2700 return dd->rcv_err_status_cnt[1];
2701}
2702
2703static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2704 void *context, int vl, int mode,
2705 u64 data)
2706{
2707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2708
2709 return dd->rcv_err_status_cnt[0];
2710}
2711
2712/*
2713 * Software counters corresponding to each of the
2714 * error status bits within SendPioErrStatus
2715 */
2716static u64 access_pio_pec_sop_head_parity_err_cnt(
2717 const struct cntr_entry *entry,
2718 void *context, int vl, int mode, u64 data)
2719{
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722 return dd->send_pio_err_status_cnt[35];
2723}
2724
2725static u64 access_pio_pcc_sop_head_parity_err_cnt(
2726 const struct cntr_entry *entry,
2727 void *context, int vl, int mode, u64 data)
2728{
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731 return dd->send_pio_err_status_cnt[34];
2732}
2733
2734static u64 access_pio_last_returned_cnt_parity_err_cnt(
2735 const struct cntr_entry *entry,
2736 void *context, int vl, int mode, u64 data)
2737{
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740 return dd->send_pio_err_status_cnt[33];
2741}
2742
2743static u64 access_pio_current_free_cnt_parity_err_cnt(
2744 const struct cntr_entry *entry,
2745 void *context, int vl, int mode, u64 data)
2746{
2747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749 return dd->send_pio_err_status_cnt[32];
2750}
2751
2752static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2753 void *context, int vl, int mode,
2754 u64 data)
2755{
2756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758 return dd->send_pio_err_status_cnt[31];
2759}
2760
2761static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2762 void *context, int vl, int mode,
2763 u64 data)
2764{
2765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767 return dd->send_pio_err_status_cnt[30];
2768}
2769
2770static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2771 void *context, int vl, int mode,
2772 u64 data)
2773{
2774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776 return dd->send_pio_err_status_cnt[29];
2777}
2778
2779static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2780 const struct cntr_entry *entry,
2781 void *context, int vl, int mode, u64 data)
2782{
2783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785 return dd->send_pio_err_status_cnt[28];
2786}
2787
2788static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2789 void *context, int vl, int mode,
2790 u64 data)
2791{
2792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794 return dd->send_pio_err_status_cnt[27];
2795}
2796
2797static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2798 void *context, int vl, int mode,
2799 u64 data)
2800{
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803 return dd->send_pio_err_status_cnt[26];
2804}
2805
2806static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2807 void *context, int vl,
2808 int mode, u64 data)
2809{
2810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811
2812 return dd->send_pio_err_status_cnt[25];
2813}
2814
2815static u64 access_pio_block_qw_count_parity_err_cnt(
2816 const struct cntr_entry *entry,
2817 void *context, int vl, int mode, u64 data)
2818{
2819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820
2821 return dd->send_pio_err_status_cnt[24];
2822}
2823
2824static u64 access_pio_write_qw_valid_parity_err_cnt(
2825 const struct cntr_entry *entry,
2826 void *context, int vl, int mode, u64 data)
2827{
2828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829
2830 return dd->send_pio_err_status_cnt[23];
2831}
2832
2833static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2834 void *context, int vl, int mode,
2835 u64 data)
2836{
2837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838
2839 return dd->send_pio_err_status_cnt[22];
2840}
2841
2842static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2843 void *context, int vl,
2844 int mode, u64 data)
2845{
2846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847
2848 return dd->send_pio_err_status_cnt[21];
2849}
2850
2851static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2852 void *context, int vl,
2853 int mode, u64 data)
2854{
2855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856
2857 return dd->send_pio_err_status_cnt[20];
2858}
2859
2860static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2861 void *context, int vl,
2862 int mode, u64 data)
2863{
2864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865
2866 return dd->send_pio_err_status_cnt[19];
2867}
2868
2869static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2870 const struct cntr_entry *entry,
2871 void *context, int vl, int mode, u64 data)
2872{
2873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874
2875 return dd->send_pio_err_status_cnt[18];
2876}
2877
2878static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2879 void *context, int vl, int mode,
2880 u64 data)
2881{
2882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883
2884 return dd->send_pio_err_status_cnt[17];
2885}
2886
2887static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2888 void *context, int vl, int mode,
2889 u64 data)
2890{
2891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892
2893 return dd->send_pio_err_status_cnt[16];
2894}
2895
2896static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2897 const struct cntr_entry *entry,
2898 void *context, int vl, int mode, u64 data)
2899{
2900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901
2902 return dd->send_pio_err_status_cnt[15];
2903}
2904
2905static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2906 const struct cntr_entry *entry,
2907 void *context, int vl, int mode, u64 data)
2908{
2909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910
2911 return dd->send_pio_err_status_cnt[14];
2912}
2913
2914static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2915 const struct cntr_entry *entry,
2916 void *context, int vl, int mode, u64 data)
2917{
2918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919
2920 return dd->send_pio_err_status_cnt[13];
2921}
2922
2923static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2924 const struct cntr_entry *entry,
2925 void *context, int vl, int mode, u64 data)
2926{
2927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928
2929 return dd->send_pio_err_status_cnt[12];
2930}
2931
2932static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2933 const struct cntr_entry *entry,
2934 void *context, int vl, int mode, u64 data)
2935{
2936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937
2938 return dd->send_pio_err_status_cnt[11];
2939}
2940
2941static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2942 const struct cntr_entry *entry,
2943 void *context, int vl, int mode, u64 data)
2944{
2945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946
2947 return dd->send_pio_err_status_cnt[10];
2948}
2949
2950static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2951 const struct cntr_entry *entry,
2952 void *context, int vl, int mode, u64 data)
2953{
2954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955
2956 return dd->send_pio_err_status_cnt[9];
2957}
2958
2959static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2960 const struct cntr_entry *entry,
2961 void *context, int vl, int mode, u64 data)
2962{
2963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964
2965 return dd->send_pio_err_status_cnt[8];
2966}
2967
2968static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2969 const struct cntr_entry *entry,
2970 void *context, int vl, int mode, u64 data)
2971{
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974 return dd->send_pio_err_status_cnt[7];
2975}
2976
2977static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2978 void *context, int vl, int mode,
2979 u64 data)
2980{
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983 return dd->send_pio_err_status_cnt[6];
2984}
2985
2986static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2987 void *context, int vl, int mode,
2988 u64 data)
2989{
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992 return dd->send_pio_err_status_cnt[5];
2993}
2994
2995static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2996 void *context, int vl, int mode,
2997 u64 data)
2998{
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001 return dd->send_pio_err_status_cnt[4];
3002}
3003
3004static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3005 void *context, int vl, int mode,
3006 u64 data)
3007{
3008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009
3010 return dd->send_pio_err_status_cnt[3];
3011}
3012
3013static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3014 void *context, int vl, int mode,
3015 u64 data)
3016{
3017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3018
3019 return dd->send_pio_err_status_cnt[2];
3020}
3021
3022static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3023 void *context, int vl,
3024 int mode, u64 data)
3025{
3026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3027
3028 return dd->send_pio_err_status_cnt[1];
3029}
3030
3031static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3032 void *context, int vl, int mode,
3033 u64 data)
3034{
3035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3036
3037 return dd->send_pio_err_status_cnt[0];
3038}
3039
3040/*
3041 * Software counters corresponding to each of the
3042 * error status bits within SendDmaErrStatus
3043 */
3044static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3045 const struct cntr_entry *entry,
3046 void *context, int vl, int mode, u64 data)
3047{
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050 return dd->send_dma_err_status_cnt[3];
3051}
3052
3053static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3054 const struct cntr_entry *entry,
3055 void *context, int vl, int mode, u64 data)
3056{
3057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059 return dd->send_dma_err_status_cnt[2];
3060}
3061
3062static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3063 void *context, int vl, int mode,
3064 u64 data)
3065{
3066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068 return dd->send_dma_err_status_cnt[1];
3069}
3070
3071static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3072 void *context, int vl, int mode,
3073 u64 data)
3074{
3075 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3076
3077 return dd->send_dma_err_status_cnt[0];
3078}
3079
3080/*
3081 * Software counters corresponding to each of the
3082 * error status bits within SendEgressErrStatus
3083 */
3084static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3085 const struct cntr_entry *entry,
3086 void *context, int vl, int mode, u64 data)
3087{
3088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090 return dd->send_egress_err_status_cnt[63];
3091}
3092
3093static u64 access_tx_read_sdma_memory_csr_err_cnt(
3094 const struct cntr_entry *entry,
3095 void *context, int vl, int mode, u64 data)
3096{
3097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099 return dd->send_egress_err_status_cnt[62];
3100}
3101
3102static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3103 void *context, int vl, int mode,
3104 u64 data)
3105{
3106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108 return dd->send_egress_err_status_cnt[61];
3109}
3110
3111static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3112 void *context, int vl,
3113 int mode, u64 data)
3114{
3115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116
3117 return dd->send_egress_err_status_cnt[60];
3118}
3119
3120static u64 access_tx_read_sdma_memory_cor_err_cnt(
3121 const struct cntr_entry *entry,
3122 void *context, int vl, int mode, u64 data)
3123{
3124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125
3126 return dd->send_egress_err_status_cnt[59];
3127}
3128
3129static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3130 void *context, int vl, int mode,
3131 u64 data)
3132{
3133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134
3135 return dd->send_egress_err_status_cnt[58];
3136}
3137
3138static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3139 void *context, int vl, int mode,
3140 u64 data)
3141{
3142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144 return dd->send_egress_err_status_cnt[57];
3145}
3146
3147static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3148 void *context, int vl, int mode,
3149 u64 data)
3150{
3151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153 return dd->send_egress_err_status_cnt[56];
3154}
3155
3156static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3157 void *context, int vl, int mode,
3158 u64 data)
3159{
3160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162 return dd->send_egress_err_status_cnt[55];
3163}
3164
3165static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3166 void *context, int vl, int mode,
3167 u64 data)
3168{
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171 return dd->send_egress_err_status_cnt[54];
3172}
3173
3174static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3175 void *context, int vl, int mode,
3176 u64 data)
3177{
3178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179
3180 return dd->send_egress_err_status_cnt[53];
3181}
3182
3183static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3184 void *context, int vl, int mode,
3185 u64 data)
3186{
3187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188
3189 return dd->send_egress_err_status_cnt[52];
3190}
3191
3192static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3193 void *context, int vl, int mode,
3194 u64 data)
3195{
3196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197
3198 return dd->send_egress_err_status_cnt[51];
3199}
3200
3201static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3202 void *context, int vl, int mode,
3203 u64 data)
3204{
3205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206
3207 return dd->send_egress_err_status_cnt[50];
3208}
3209
3210static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3211 void *context, int vl, int mode,
3212 u64 data)
3213{
3214 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215
3216 return dd->send_egress_err_status_cnt[49];
3217}
3218
3219static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3220 void *context, int vl, int mode,
3221 u64 data)
3222{
3223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224
3225 return dd->send_egress_err_status_cnt[48];
3226}
3227
3228static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3229 void *context, int vl, int mode,
3230 u64 data)
3231{
3232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233
3234 return dd->send_egress_err_status_cnt[47];
3235}
3236
3237static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3238 void *context, int vl, int mode,
3239 u64 data)
3240{
3241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242
3243 return dd->send_egress_err_status_cnt[46];
3244}
3245
3246static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3247 void *context, int vl, int mode,
3248 u64 data)
3249{
3250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251
3252 return dd->send_egress_err_status_cnt[45];
3253}
3254
3255static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3256 void *context, int vl,
3257 int mode, u64 data)
3258{
3259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260
3261 return dd->send_egress_err_status_cnt[44];
3262}
3263
3264static u64 access_tx_read_sdma_memory_unc_err_cnt(
3265 const struct cntr_entry *entry,
3266 void *context, int vl, int mode, u64 data)
3267{
3268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269
3270 return dd->send_egress_err_status_cnt[43];
3271}
3272
3273static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3274 void *context, int vl, int mode,
3275 u64 data)
3276{
3277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278
3279 return dd->send_egress_err_status_cnt[42];
3280}
3281
3282static u64 access_tx_credit_return_partiy_err_cnt(
3283 const struct cntr_entry *entry,
3284 void *context, int vl, int mode, u64 data)
3285{
3286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287
3288 return dd->send_egress_err_status_cnt[41];
3289}
3290
3291static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3292 const struct cntr_entry *entry,
3293 void *context, int vl, int mode, u64 data)
3294{
3295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296
3297 return dd->send_egress_err_status_cnt[40];
3298}
3299
3300static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3301 const struct cntr_entry *entry,
3302 void *context, int vl, int mode, u64 data)
3303{
3304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305
3306 return dd->send_egress_err_status_cnt[39];
3307}
3308
3309static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3310 const struct cntr_entry *entry,
3311 void *context, int vl, int mode, u64 data)
3312{
3313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314
3315 return dd->send_egress_err_status_cnt[38];
3316}
3317
3318static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3319 const struct cntr_entry *entry,
3320 void *context, int vl, int mode, u64 data)
3321{
3322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323
3324 return dd->send_egress_err_status_cnt[37];
3325}
3326
3327static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3328 const struct cntr_entry *entry,
3329 void *context, int vl, int mode, u64 data)
3330{
3331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332
3333 return dd->send_egress_err_status_cnt[36];
3334}
3335
3336static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3337 const struct cntr_entry *entry,
3338 void *context, int vl, int mode, u64 data)
3339{
3340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341
3342 return dd->send_egress_err_status_cnt[35];
3343}
3344
3345static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3346 const struct cntr_entry *entry,
3347 void *context, int vl, int mode, u64 data)
3348{
3349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350
3351 return dd->send_egress_err_status_cnt[34];
3352}
3353
3354static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3355 const struct cntr_entry *entry,
3356 void *context, int vl, int mode, u64 data)
3357{
3358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359
3360 return dd->send_egress_err_status_cnt[33];
3361}
3362
3363static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3364 const struct cntr_entry *entry,
3365 void *context, int vl, int mode, u64 data)
3366{
3367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368
3369 return dd->send_egress_err_status_cnt[32];
3370}
3371
3372static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3373 const struct cntr_entry *entry,
3374 void *context, int vl, int mode, u64 data)
3375{
3376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377
3378 return dd->send_egress_err_status_cnt[31];
3379}
3380
3381static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3382 const struct cntr_entry *entry,
3383 void *context, int vl, int mode, u64 data)
3384{
3385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386
3387 return dd->send_egress_err_status_cnt[30];
3388}
3389
3390static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3391 const struct cntr_entry *entry,
3392 void *context, int vl, int mode, u64 data)
3393{
3394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395
3396 return dd->send_egress_err_status_cnt[29];
3397}
3398
3399static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3400 const struct cntr_entry *entry,
3401 void *context, int vl, int mode, u64 data)
3402{
3403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404
3405 return dd->send_egress_err_status_cnt[28];
3406}
3407
3408static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3409 const struct cntr_entry *entry,
3410 void *context, int vl, int mode, u64 data)
3411{
3412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413
3414 return dd->send_egress_err_status_cnt[27];
3415}
3416
3417static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3418 const struct cntr_entry *entry,
3419 void *context, int vl, int mode, u64 data)
3420{
3421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422
3423 return dd->send_egress_err_status_cnt[26];
3424}
3425
3426static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3427 const struct cntr_entry *entry,
3428 void *context, int vl, int mode, u64 data)
3429{
3430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431
3432 return dd->send_egress_err_status_cnt[25];
3433}
3434
3435static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3436 const struct cntr_entry *entry,
3437 void *context, int vl, int mode, u64 data)
3438{
3439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440
3441 return dd->send_egress_err_status_cnt[24];
3442}
3443
3444static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3445 const struct cntr_entry *entry,
3446 void *context, int vl, int mode, u64 data)
3447{
3448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449
3450 return dd->send_egress_err_status_cnt[23];
3451}
3452
3453static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3454 const struct cntr_entry *entry,
3455 void *context, int vl, int mode, u64 data)
3456{
3457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458
3459 return dd->send_egress_err_status_cnt[22];
3460}
3461
3462static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3463 const struct cntr_entry *entry,
3464 void *context, int vl, int mode, u64 data)
3465{
3466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467
3468 return dd->send_egress_err_status_cnt[21];
3469}
3470
3471static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3472 const struct cntr_entry *entry,
3473 void *context, int vl, int mode, u64 data)
3474{
3475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476
3477 return dd->send_egress_err_status_cnt[20];
3478}
3479
3480static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3481 const struct cntr_entry *entry,
3482 void *context, int vl, int mode, u64 data)
3483{
3484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485
3486 return dd->send_egress_err_status_cnt[19];
3487}
3488
3489static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3490 const struct cntr_entry *entry,
3491 void *context, int vl, int mode, u64 data)
3492{
3493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494
3495 return dd->send_egress_err_status_cnt[18];
3496}
3497
3498static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3499 const struct cntr_entry *entry,
3500 void *context, int vl, int mode, u64 data)
3501{
3502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503
3504 return dd->send_egress_err_status_cnt[17];
3505}
3506
3507static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3508 const struct cntr_entry *entry,
3509 void *context, int vl, int mode, u64 data)
3510{
3511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512
3513 return dd->send_egress_err_status_cnt[16];
3514}
3515
3516static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3517 void *context, int vl, int mode,
3518 u64 data)
3519{
3520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521
3522 return dd->send_egress_err_status_cnt[15];
3523}
3524
3525static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3526 void *context, int vl,
3527 int mode, u64 data)
3528{
3529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530
3531 return dd->send_egress_err_status_cnt[14];
3532}
3533
3534static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3535 void *context, int vl, int mode,
3536 u64 data)
3537{
3538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539
3540 return dd->send_egress_err_status_cnt[13];
3541}
3542
3543static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3544 void *context, int vl, int mode,
3545 u64 data)
3546{
3547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548
3549 return dd->send_egress_err_status_cnt[12];
3550}
3551
3552static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3553 const struct cntr_entry *entry,
3554 void *context, int vl, int mode, u64 data)
3555{
3556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557
3558 return dd->send_egress_err_status_cnt[11];
3559}
3560
3561static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3562 void *context, int vl, int mode,
3563 u64 data)
3564{
3565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566
3567 return dd->send_egress_err_status_cnt[10];
3568}
3569
3570static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3571 void *context, int vl, int mode,
3572 u64 data)
3573{
3574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575
3576 return dd->send_egress_err_status_cnt[9];
3577}
3578
3579static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3580 const struct cntr_entry *entry,
3581 void *context, int vl, int mode, u64 data)
3582{
3583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584
3585 return dd->send_egress_err_status_cnt[8];
3586}
3587
3588static u64 access_tx_pio_launch_intf_parity_err_cnt(
3589 const struct cntr_entry *entry,
3590 void *context, int vl, int mode, u64 data)
3591{
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594 return dd->send_egress_err_status_cnt[7];
3595}
3596
3597static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3598 void *context, int vl, int mode,
3599 u64 data)
3600{
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603 return dd->send_egress_err_status_cnt[6];
3604}
3605
3606static u64 access_tx_incorrect_link_state_err_cnt(
3607 const struct cntr_entry *entry,
3608 void *context, int vl, int mode, u64 data)
3609{
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612 return dd->send_egress_err_status_cnt[5];
3613}
3614
3615static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3616 void *context, int vl, int mode,
3617 u64 data)
3618{
3619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620
3621 return dd->send_egress_err_status_cnt[4];
3622}
3623
3624static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3625 const struct cntr_entry *entry,
3626 void *context, int vl, int mode, u64 data)
3627{
3628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629
3630 return dd->send_egress_err_status_cnt[3];
3631}
3632
3633static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3634 void *context, int vl, int mode,
3635 u64 data)
3636{
3637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3638
3639 return dd->send_egress_err_status_cnt[2];
3640}
3641
3642static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3643 const struct cntr_entry *entry,
3644 void *context, int vl, int mode, u64 data)
3645{
3646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3647
3648 return dd->send_egress_err_status_cnt[1];
3649}
3650
3651static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3652 const struct cntr_entry *entry,
3653 void *context, int vl, int mode, u64 data)
3654{
3655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3656
3657 return dd->send_egress_err_status_cnt[0];
3658}
3659
3660/*
3661 * Software counters corresponding to each of the
3662 * error status bits within SendErrStatus
3663 */
3664static u64 access_send_csr_write_bad_addr_err_cnt(
3665 const struct cntr_entry *entry,
3666 void *context, int vl, int mode, u64 data)
3667{
3668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669
3670 return dd->send_err_status_cnt[2];
3671}
3672
3673static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3674 void *context, int vl,
3675 int mode, u64 data)
3676{
3677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678
3679 return dd->send_err_status_cnt[1];
3680}
3681
3682static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3683 void *context, int vl, int mode,
3684 u64 data)
3685{
3686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3687
3688 return dd->send_err_status_cnt[0];
3689}
3690
3691/*
3692 * Software counters corresponding to each of the
3693 * error status bits within SendCtxtErrStatus
3694 */
3695static u64 access_pio_write_out_of_bounds_err_cnt(
3696 const struct cntr_entry *entry,
3697 void *context, int vl, int mode, u64 data)
3698{
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701 return dd->sw_ctxt_err_status_cnt[4];
3702}
3703
3704static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3705 void *context, int vl, int mode,
3706 u64 data)
3707{
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710 return dd->sw_ctxt_err_status_cnt[3];
3711}
3712
3713static u64 access_pio_write_crosses_boundary_err_cnt(
3714 const struct cntr_entry *entry,
3715 void *context, int vl, int mode, u64 data)
3716{
3717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718
3719 return dd->sw_ctxt_err_status_cnt[2];
3720}
3721
3722static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3723 void *context, int vl,
3724 int mode, u64 data)
3725{
3726 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3727
3728 return dd->sw_ctxt_err_status_cnt[1];
3729}
3730
3731static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3732 void *context, int vl, int mode,
3733 u64 data)
3734{
3735 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3736
3737 return dd->sw_ctxt_err_status_cnt[0];
3738}
3739
3740/*
3741 * Software counters corresponding to each of the
3742 * error status bits within SendDmaEngErrStatus
3743 */
3744static u64 access_sdma_header_request_fifo_cor_err_cnt(
3745 const struct cntr_entry *entry,
3746 void *context, int vl, int mode, u64 data)
3747{
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750 return dd->sw_send_dma_eng_err_status_cnt[23];
3751}
3752
3753static u64 access_sdma_header_storage_cor_err_cnt(
3754 const struct cntr_entry *entry,
3755 void *context, int vl, int mode, u64 data)
3756{
3757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759 return dd->sw_send_dma_eng_err_status_cnt[22];
3760}
3761
3762static u64 access_sdma_packet_tracking_cor_err_cnt(
3763 const struct cntr_entry *entry,
3764 void *context, int vl, int mode, u64 data)
3765{
3766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768 return dd->sw_send_dma_eng_err_status_cnt[21];
3769}
3770
3771static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3772 void *context, int vl, int mode,
3773 u64 data)
3774{
3775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776
3777 return dd->sw_send_dma_eng_err_status_cnt[20];
3778}
3779
3780static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3781 void *context, int vl, int mode,
3782 u64 data)
3783{
3784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785
3786 return dd->sw_send_dma_eng_err_status_cnt[19];
3787}
3788
3789static u64 access_sdma_header_request_fifo_unc_err_cnt(
3790 const struct cntr_entry *entry,
3791 void *context, int vl, int mode, u64 data)
3792{
3793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795 return dd->sw_send_dma_eng_err_status_cnt[18];
3796}
3797
3798static u64 access_sdma_header_storage_unc_err_cnt(
3799 const struct cntr_entry *entry,
3800 void *context, int vl, int mode, u64 data)
3801{
3802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804 return dd->sw_send_dma_eng_err_status_cnt[17];
3805}
3806
3807static u64 access_sdma_packet_tracking_unc_err_cnt(
3808 const struct cntr_entry *entry,
3809 void *context, int vl, int mode, u64 data)
3810{
3811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813 return dd->sw_send_dma_eng_err_status_cnt[16];
3814}
3815
3816static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3817 void *context, int vl, int mode,
3818 u64 data)
3819{
3820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822 return dd->sw_send_dma_eng_err_status_cnt[15];
3823}
3824
3825static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3826 void *context, int vl, int mode,
3827 u64 data)
3828{
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831 return dd->sw_send_dma_eng_err_status_cnt[14];
3832}
3833
3834static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3835 void *context, int vl, int mode,
3836 u64 data)
3837{
3838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839
3840 return dd->sw_send_dma_eng_err_status_cnt[13];
3841}
3842
3843static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3844 void *context, int vl, int mode,
3845 u64 data)
3846{
3847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848
3849 return dd->sw_send_dma_eng_err_status_cnt[12];
3850}
3851
3852static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3853 void *context, int vl, int mode,
3854 u64 data)
3855{
3856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857
3858 return dd->sw_send_dma_eng_err_status_cnt[11];
3859}
3860
3861static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3862 void *context, int vl, int mode,
3863 u64 data)
3864{
3865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866
3867 return dd->sw_send_dma_eng_err_status_cnt[10];
3868}
3869
3870static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3871 void *context, int vl, int mode,
3872 u64 data)
3873{
3874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3875
3876 return dd->sw_send_dma_eng_err_status_cnt[9];
3877}
3878
3879static u64 access_sdma_packet_desc_overflow_err_cnt(
3880 const struct cntr_entry *entry,
3881 void *context, int vl, int mode, u64 data)
3882{
3883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3884
3885 return dd->sw_send_dma_eng_err_status_cnt[8];
3886}
3887
3888static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3889 void *context, int vl,
3890 int mode, u64 data)
3891{
3892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3893
3894 return dd->sw_send_dma_eng_err_status_cnt[7];
3895}
3896
3897static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3898 void *context, int vl, int mode, u64 data)
3899{
3900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901
3902 return dd->sw_send_dma_eng_err_status_cnt[6];
3903}
3904
3905static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3906 void *context, int vl, int mode,
3907 u64 data)
3908{
3909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910
3911 return dd->sw_send_dma_eng_err_status_cnt[5];
3912}
3913
3914static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3915 void *context, int vl, int mode,
3916 u64 data)
3917{
3918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919
3920 return dd->sw_send_dma_eng_err_status_cnt[4];
3921}
3922
3923static u64 access_sdma_tail_out_of_bounds_err_cnt(
3924 const struct cntr_entry *entry,
3925 void *context, int vl, int mode, u64 data)
3926{
3927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928
3929 return dd->sw_send_dma_eng_err_status_cnt[3];
3930}
3931
3932static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3933 void *context, int vl, int mode,
3934 u64 data)
3935{
3936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3937
3938 return dd->sw_send_dma_eng_err_status_cnt[2];
3939}
3940
3941static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3942 void *context, int vl, int mode,
3943 u64 data)
3944{
3945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3946
3947 return dd->sw_send_dma_eng_err_status_cnt[1];
3948}
3949
3950static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3951 void *context, int vl, int mode,
3952 u64 data)
3953{
3954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3955
3956 return dd->sw_send_dma_eng_err_status_cnt[0];
3957}
3958
Jakub Pawlak2b719042016-07-01 16:01:22 -07003959static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3960 void *context, int vl, int mode,
3961 u64 data)
3962{
3963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3964
3965 u64 val = 0;
3966 u64 csr = entry->csr;
3967
3968 val = read_write_csr(dd, csr, mode, data);
3969 if (mode == CNTR_MODE_R) {
3970 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3971 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3972 } else if (mode == CNTR_MODE_W) {
3973 dd->sw_rcv_bypass_packet_errors = 0;
3974 } else {
3975 dd_dev_err(dd, "Invalid cntr register access mode");
3976 return 0;
3977 }
3978 return val;
3979}
3980
Mike Marciniszyn77241052015-07-30 15:17:43 -04003981#define def_access_sw_cpu(cntr) \
3982static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3983 void *context, int vl, int mode, u64 data) \
3984{ \
3985 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003986 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3987 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003988 mode, data); \
3989}
3990
3991def_access_sw_cpu(rc_acks);
3992def_access_sw_cpu(rc_qacks);
3993def_access_sw_cpu(rc_delayed_comp);
3994
3995#define def_access_ibp_counter(cntr) \
3996static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3997 void *context, int vl, int mode, u64 data) \
3998{ \
3999 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4000 \
4001 if (vl != CNTR_INVALID_VL) \
4002 return 0; \
4003 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004004 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004005 mode, data); \
4006}
4007
4008def_access_ibp_counter(loop_pkts);
4009def_access_ibp_counter(rc_resends);
4010def_access_ibp_counter(rnr_naks);
4011def_access_ibp_counter(other_naks);
4012def_access_ibp_counter(rc_timeouts);
4013def_access_ibp_counter(pkt_drops);
4014def_access_ibp_counter(dmawait);
4015def_access_ibp_counter(rc_seqnak);
4016def_access_ibp_counter(rc_dupreq);
4017def_access_ibp_counter(rdma_seq);
4018def_access_ibp_counter(unaligned);
4019def_access_ibp_counter(seq_naks);
4020
4021static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4022[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4023[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4024 CNTR_NORMAL),
4025[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4026 CNTR_NORMAL),
4027[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4028 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4029 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004030[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4031 CNTR_NORMAL),
4032[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4033 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4034[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4035 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4036[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4037 CNTR_NORMAL),
4038[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4039 CNTR_NORMAL),
4040[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4041 CNTR_NORMAL),
4042[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4043 CNTR_NORMAL),
4044[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4045 CNTR_NORMAL),
4046[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4047 CNTR_NORMAL),
4048[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4049 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4050[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4051 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4052[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4053 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004054[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4055 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004056[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4057 CNTR_SYNTH),
4058[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4059 CNTR_SYNTH),
4060[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4061 CNTR_SYNTH),
4062[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4063 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4064[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4065 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4066 CNTR_SYNTH),
4067[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4068 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4069[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4070 CNTR_SYNTH),
4071[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4072 CNTR_SYNTH),
4073[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4074 CNTR_SYNTH),
4075[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4076 CNTR_SYNTH),
4077[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4078 CNTR_SYNTH),
4079[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4080 CNTR_SYNTH),
4081[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4082 CNTR_SYNTH),
4083[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4084 CNTR_SYNTH | CNTR_VL),
4085[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4086 CNTR_SYNTH | CNTR_VL),
4087[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4088[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4089 CNTR_SYNTH | CNTR_VL),
4090[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4091[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4092 CNTR_SYNTH | CNTR_VL),
4093[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4094 CNTR_SYNTH),
4095[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4096 CNTR_SYNTH | CNTR_VL),
4097[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4098 CNTR_SYNTH),
4099[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4100 CNTR_SYNTH | CNTR_VL),
4101[C_DC_TOTAL_CRC] =
4102 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4103 CNTR_SYNTH),
4104[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4105 CNTR_SYNTH),
4106[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4107 CNTR_SYNTH),
4108[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4109 CNTR_SYNTH),
4110[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4111 CNTR_SYNTH),
4112[C_DC_CRC_MULT_LN] =
4113 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4114 CNTR_SYNTH),
4115[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4116 CNTR_SYNTH),
4117[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4118 CNTR_SYNTH),
4119[C_DC_SEQ_CRC_CNT] =
4120 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4121 CNTR_SYNTH),
4122[C_DC_ESC0_ONLY_CNT] =
4123 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4124 CNTR_SYNTH),
4125[C_DC_ESC0_PLUS1_CNT] =
4126 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4127 CNTR_SYNTH),
4128[C_DC_ESC0_PLUS2_CNT] =
4129 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4130 CNTR_SYNTH),
4131[C_DC_REINIT_FROM_PEER_CNT] =
4132 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4133 CNTR_SYNTH),
4134[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4135 CNTR_SYNTH),
4136[C_DC_MISC_FLG_CNT] =
4137 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4138 CNTR_SYNTH),
4139[C_DC_PRF_GOOD_LTP_CNT] =
4140 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4141[C_DC_PRF_ACCEPTED_LTP_CNT] =
4142 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4143 CNTR_SYNTH),
4144[C_DC_PRF_RX_FLIT_CNT] =
4145 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4146[C_DC_PRF_TX_FLIT_CNT] =
4147 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4148[C_DC_PRF_CLK_CNTR] =
4149 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4150[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4151 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4152[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4153 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4154 CNTR_SYNTH),
4155[C_DC_PG_STS_TX_SBE_CNT] =
4156 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4157[C_DC_PG_STS_TX_MBE_CNT] =
4158 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4159 CNTR_SYNTH),
4160[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4161 access_sw_cpu_intr),
4162[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4163 access_sw_cpu_rcv_limit),
4164[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4165 access_sw_vtx_wait),
4166[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4167 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004168[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4169 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004170[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4171 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004172[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4173 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004174[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4175 SEND_DMA_DESC_FETCHED_CNT, 0,
4176 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4177 dev_access_u32_csr),
4178[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4179 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4180 access_sde_int_cnt),
4181[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4182 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4183 access_sde_err_cnt),
4184[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4185 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4186 access_sde_idle_int_cnt),
4187[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4188 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4189 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004190/* MISC_ERR_STATUS */
4191[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4192 CNTR_NORMAL,
4193 access_misc_pll_lock_fail_err_cnt),
4194[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4195 CNTR_NORMAL,
4196 access_misc_mbist_fail_err_cnt),
4197[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4198 CNTR_NORMAL,
4199 access_misc_invalid_eep_cmd_err_cnt),
4200[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4201 CNTR_NORMAL,
4202 access_misc_efuse_done_parity_err_cnt),
4203[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4204 CNTR_NORMAL,
4205 access_misc_efuse_write_err_cnt),
4206[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4207 0, CNTR_NORMAL,
4208 access_misc_efuse_read_bad_addr_err_cnt),
4209[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4210 CNTR_NORMAL,
4211 access_misc_efuse_csr_parity_err_cnt),
4212[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4213 CNTR_NORMAL,
4214 access_misc_fw_auth_failed_err_cnt),
4215[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4216 CNTR_NORMAL,
4217 access_misc_key_mismatch_err_cnt),
4218[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4219 CNTR_NORMAL,
4220 access_misc_sbus_write_failed_err_cnt),
4221[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4222 CNTR_NORMAL,
4223 access_misc_csr_write_bad_addr_err_cnt),
4224[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4225 CNTR_NORMAL,
4226 access_misc_csr_read_bad_addr_err_cnt),
4227[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4228 CNTR_NORMAL,
4229 access_misc_csr_parity_err_cnt),
4230/* CceErrStatus */
4231[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4232 CNTR_NORMAL,
4233 access_sw_cce_err_status_aggregated_cnt),
4234[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4235 CNTR_NORMAL,
4236 access_cce_msix_csr_parity_err_cnt),
4237[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4238 CNTR_NORMAL,
4239 access_cce_int_map_unc_err_cnt),
4240[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4241 CNTR_NORMAL,
4242 access_cce_int_map_cor_err_cnt),
4243[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4244 CNTR_NORMAL,
4245 access_cce_msix_table_unc_err_cnt),
4246[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4247 CNTR_NORMAL,
4248 access_cce_msix_table_cor_err_cnt),
4249[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4250 0, CNTR_NORMAL,
4251 access_cce_rxdma_conv_fifo_parity_err_cnt),
4252[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4253 0, CNTR_NORMAL,
4254 access_cce_rcpl_async_fifo_parity_err_cnt),
4255[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4256 CNTR_NORMAL,
4257 access_cce_seg_write_bad_addr_err_cnt),
4258[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4259 CNTR_NORMAL,
4260 access_cce_seg_read_bad_addr_err_cnt),
4261[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4262 CNTR_NORMAL,
4263 access_la_triggered_cnt),
4264[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4265 CNTR_NORMAL,
4266 access_cce_trgt_cpl_timeout_err_cnt),
4267[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4268 CNTR_NORMAL,
4269 access_pcic_receive_parity_err_cnt),
4270[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4271 CNTR_NORMAL,
4272 access_pcic_transmit_back_parity_err_cnt),
4273[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4274 0, CNTR_NORMAL,
4275 access_pcic_transmit_front_parity_err_cnt),
4276[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4277 CNTR_NORMAL,
4278 access_pcic_cpl_dat_q_unc_err_cnt),
4279[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4280 CNTR_NORMAL,
4281 access_pcic_cpl_hd_q_unc_err_cnt),
4282[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4283 CNTR_NORMAL,
4284 access_pcic_post_dat_q_unc_err_cnt),
4285[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4286 CNTR_NORMAL,
4287 access_pcic_post_hd_q_unc_err_cnt),
4288[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4289 CNTR_NORMAL,
4290 access_pcic_retry_sot_mem_unc_err_cnt),
4291[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4292 CNTR_NORMAL,
4293 access_pcic_retry_mem_unc_err),
4294[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4295 CNTR_NORMAL,
4296 access_pcic_n_post_dat_q_parity_err_cnt),
4297[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4298 CNTR_NORMAL,
4299 access_pcic_n_post_h_q_parity_err_cnt),
4300[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4301 CNTR_NORMAL,
4302 access_pcic_cpl_dat_q_cor_err_cnt),
4303[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4304 CNTR_NORMAL,
4305 access_pcic_cpl_hd_q_cor_err_cnt),
4306[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4307 CNTR_NORMAL,
4308 access_pcic_post_dat_q_cor_err_cnt),
4309[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4310 CNTR_NORMAL,
4311 access_pcic_post_hd_q_cor_err_cnt),
4312[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_pcic_retry_sot_mem_cor_err_cnt),
4315[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_pcic_retry_mem_cor_err_cnt),
4318[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4319 "CceCli1AsyncFifoDbgParityError", 0, 0,
4320 CNTR_NORMAL,
4321 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4322[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4323 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4324 CNTR_NORMAL,
4325 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4326 ),
4327[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4328 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4329 CNTR_NORMAL,
4330 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4331[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4332 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4333 CNTR_NORMAL,
4334 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4335[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4336 0, CNTR_NORMAL,
4337 access_cce_cli2_async_fifo_parity_err_cnt),
4338[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4339 CNTR_NORMAL,
4340 access_cce_csr_cfg_bus_parity_err_cnt),
4341[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4342 0, CNTR_NORMAL,
4343 access_cce_cli0_async_fifo_parity_err_cnt),
4344[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4345 CNTR_NORMAL,
4346 access_cce_rspd_data_parity_err_cnt),
4347[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4348 CNTR_NORMAL,
4349 access_cce_trgt_access_err_cnt),
4350[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4351 0, CNTR_NORMAL,
4352 access_cce_trgt_async_fifo_parity_err_cnt),
4353[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4354 CNTR_NORMAL,
4355 access_cce_csr_write_bad_addr_err_cnt),
4356[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4357 CNTR_NORMAL,
4358 access_cce_csr_read_bad_addr_err_cnt),
4359[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_ccs_csr_parity_err_cnt),
4362
4363/* RcvErrStatus */
4364[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4365 CNTR_NORMAL,
4366 access_rx_csr_parity_err_cnt),
4367[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4368 CNTR_NORMAL,
4369 access_rx_csr_write_bad_addr_err_cnt),
4370[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4371 CNTR_NORMAL,
4372 access_rx_csr_read_bad_addr_err_cnt),
4373[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4374 CNTR_NORMAL,
4375 access_rx_dma_csr_unc_err_cnt),
4376[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4377 CNTR_NORMAL,
4378 access_rx_dma_dq_fsm_encoding_err_cnt),
4379[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4380 CNTR_NORMAL,
4381 access_rx_dma_eq_fsm_encoding_err_cnt),
4382[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4383 CNTR_NORMAL,
4384 access_rx_dma_csr_parity_err_cnt),
4385[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4386 CNTR_NORMAL,
4387 access_rx_rbuf_data_cor_err_cnt),
4388[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4389 CNTR_NORMAL,
4390 access_rx_rbuf_data_unc_err_cnt),
4391[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4392 CNTR_NORMAL,
4393 access_rx_dma_data_fifo_rd_cor_err_cnt),
4394[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4395 CNTR_NORMAL,
4396 access_rx_dma_data_fifo_rd_unc_err_cnt),
4397[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4398 CNTR_NORMAL,
4399 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4400[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4401 CNTR_NORMAL,
4402 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4403[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4404 CNTR_NORMAL,
4405 access_rx_rbuf_desc_part2_cor_err_cnt),
4406[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_rx_rbuf_desc_part2_unc_err_cnt),
4409[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4410 CNTR_NORMAL,
4411 access_rx_rbuf_desc_part1_cor_err_cnt),
4412[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4413 CNTR_NORMAL,
4414 access_rx_rbuf_desc_part1_unc_err_cnt),
4415[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4416 CNTR_NORMAL,
4417 access_rx_hq_intr_fsm_err_cnt),
4418[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4419 CNTR_NORMAL,
4420 access_rx_hq_intr_csr_parity_err_cnt),
4421[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4422 CNTR_NORMAL,
4423 access_rx_lookup_csr_parity_err_cnt),
4424[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4425 CNTR_NORMAL,
4426 access_rx_lookup_rcv_array_cor_err_cnt),
4427[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4428 CNTR_NORMAL,
4429 access_rx_lookup_rcv_array_unc_err_cnt),
4430[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4431 0, CNTR_NORMAL,
4432 access_rx_lookup_des_part2_parity_err_cnt),
4433[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4434 0, CNTR_NORMAL,
4435 access_rx_lookup_des_part1_unc_cor_err_cnt),
4436[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4437 CNTR_NORMAL,
4438 access_rx_lookup_des_part1_unc_err_cnt),
4439[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4440 CNTR_NORMAL,
4441 access_rx_rbuf_next_free_buf_cor_err_cnt),
4442[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_rbuf_next_free_buf_unc_err_cnt),
4445[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4446 "RxRbufFlInitWrAddrParityErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4449[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4450 0, CNTR_NORMAL,
4451 access_rx_rbuf_fl_initdone_parity_err_cnt),
4452[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4453 0, CNTR_NORMAL,
4454 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4455[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4456 CNTR_NORMAL,
4457 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4458[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_rx_rbuf_empty_err_cnt),
4461[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4462 CNTR_NORMAL,
4463 access_rx_rbuf_full_err_cnt),
4464[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4465 CNTR_NORMAL,
4466 access_rbuf_bad_lookup_err_cnt),
4467[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_rbuf_ctx_id_parity_err_cnt),
4470[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rbuf_csr_qeopdw_parity_err_cnt),
4473[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4474 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4475 CNTR_NORMAL,
4476 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4477[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4478 "RxRbufCsrQTlPtrParityErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4481[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4482 0, CNTR_NORMAL,
4483 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4484[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4485 0, CNTR_NORMAL,
4486 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4487[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4488 0, 0, CNTR_NORMAL,
4489 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4490[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4491 0, CNTR_NORMAL,
4492 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4493[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4494 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4495 CNTR_NORMAL,
4496 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4497[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4498 0, CNTR_NORMAL,
4499 access_rx_rbuf_block_list_read_cor_err_cnt),
4500[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4501 0, CNTR_NORMAL,
4502 access_rx_rbuf_block_list_read_unc_err_cnt),
4503[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4504 CNTR_NORMAL,
4505 access_rx_rbuf_lookup_des_cor_err_cnt),
4506[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4507 CNTR_NORMAL,
4508 access_rx_rbuf_lookup_des_unc_err_cnt),
4509[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4510 "RxRbufLookupDesRegUncCorErr", 0, 0,
4511 CNTR_NORMAL,
4512 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4513[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4514 CNTR_NORMAL,
4515 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4516[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4517 CNTR_NORMAL,
4518 access_rx_rbuf_free_list_cor_err_cnt),
4519[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4520 CNTR_NORMAL,
4521 access_rx_rbuf_free_list_unc_err_cnt),
4522[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4523 CNTR_NORMAL,
4524 access_rx_rcv_fsm_encoding_err_cnt),
4525[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rx_dma_flag_cor_err_cnt),
4528[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4529 CNTR_NORMAL,
4530 access_rx_dma_flag_unc_err_cnt),
4531[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4532 CNTR_NORMAL,
4533 access_rx_dc_sop_eop_parity_err_cnt),
4534[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_rx_rcv_csr_parity_err_cnt),
4537[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_rx_rcv_qp_map_table_cor_err_cnt),
4540[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4541 CNTR_NORMAL,
4542 access_rx_rcv_qp_map_table_unc_err_cnt),
4543[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4544 CNTR_NORMAL,
4545 access_rx_rcv_data_cor_err_cnt),
4546[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4547 CNTR_NORMAL,
4548 access_rx_rcv_data_unc_err_cnt),
4549[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4550 CNTR_NORMAL,
4551 access_rx_rcv_hdr_cor_err_cnt),
4552[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_rx_rcv_hdr_unc_err_cnt),
4555[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4556 CNTR_NORMAL,
4557 access_rx_dc_intf_parity_err_cnt),
4558[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4559 CNTR_NORMAL,
4560 access_rx_dma_csr_cor_err_cnt),
4561/* SendPioErrStatus */
4562[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4563 CNTR_NORMAL,
4564 access_pio_pec_sop_head_parity_err_cnt),
4565[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4566 CNTR_NORMAL,
4567 access_pio_pcc_sop_head_parity_err_cnt),
4568[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4569 0, 0, CNTR_NORMAL,
4570 access_pio_last_returned_cnt_parity_err_cnt),
4571[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4572 0, CNTR_NORMAL,
4573 access_pio_current_free_cnt_parity_err_cnt),
4574[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4575 CNTR_NORMAL,
4576 access_pio_reserved_31_err_cnt),
4577[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4578 CNTR_NORMAL,
4579 access_pio_reserved_30_err_cnt),
4580[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_pio_ppmc_sop_len_err_cnt),
4583[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4584 CNTR_NORMAL,
4585 access_pio_ppmc_bqc_mem_parity_err_cnt),
4586[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_pio_vl_fifo_parity_err_cnt),
4589[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4590 CNTR_NORMAL,
4591 access_pio_vlf_sop_parity_err_cnt),
4592[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4593 CNTR_NORMAL,
4594 access_pio_vlf_v1_len_parity_err_cnt),
4595[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4596 CNTR_NORMAL,
4597 access_pio_block_qw_count_parity_err_cnt),
4598[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4599 CNTR_NORMAL,
4600 access_pio_write_qw_valid_parity_err_cnt),
4601[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4602 CNTR_NORMAL,
4603 access_pio_state_machine_err_cnt),
4604[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4605 CNTR_NORMAL,
4606 access_pio_write_data_parity_err_cnt),
4607[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4608 CNTR_NORMAL,
4609 access_pio_host_addr_mem_cor_err_cnt),
4610[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4611 CNTR_NORMAL,
4612 access_pio_host_addr_mem_unc_err_cnt),
4613[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4614 CNTR_NORMAL,
4615 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4616[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4617 CNTR_NORMAL,
4618 access_pio_init_sm_in_err_cnt),
4619[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4620 CNTR_NORMAL,
4621 access_pio_ppmc_pbl_fifo_err_cnt),
4622[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4623 0, CNTR_NORMAL,
4624 access_pio_credit_ret_fifo_parity_err_cnt),
4625[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4626 CNTR_NORMAL,
4627 access_pio_v1_len_mem_bank1_cor_err_cnt),
4628[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4629 CNTR_NORMAL,
4630 access_pio_v1_len_mem_bank0_cor_err_cnt),
4631[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4632 CNTR_NORMAL,
4633 access_pio_v1_len_mem_bank1_unc_err_cnt),
4634[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4635 CNTR_NORMAL,
4636 access_pio_v1_len_mem_bank0_unc_err_cnt),
4637[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4638 CNTR_NORMAL,
4639 access_pio_sm_pkt_reset_parity_err_cnt),
4640[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4641 CNTR_NORMAL,
4642 access_pio_pkt_evict_fifo_parity_err_cnt),
4643[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4644 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4645 CNTR_NORMAL,
4646 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4647[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_pio_sbrdctl_crrel_parity_err_cnt),
4650[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_pio_pec_fifo_parity_err_cnt),
4653[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4654 CNTR_NORMAL,
4655 access_pio_pcc_fifo_parity_err_cnt),
4656[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4657 CNTR_NORMAL,
4658 access_pio_sb_mem_fifo1_err_cnt),
4659[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4660 CNTR_NORMAL,
4661 access_pio_sb_mem_fifo0_err_cnt),
4662[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_pio_csr_parity_err_cnt),
4665[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_pio_write_addr_parity_err_cnt),
4668[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_pio_write_bad_ctxt_err_cnt),
4671/* SendDmaErrStatus */
4672[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4673 0, CNTR_NORMAL,
4674 access_sdma_pcie_req_tracking_cor_err_cnt),
4675[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4676 0, CNTR_NORMAL,
4677 access_sdma_pcie_req_tracking_unc_err_cnt),
4678[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_sdma_csr_parity_err_cnt),
4681[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_sdma_rpy_tag_err_cnt),
4684/* SendEgressErrStatus */
4685[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_tx_read_pio_memory_csr_unc_err_cnt),
4688[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4689 0, CNTR_NORMAL,
4690 access_tx_read_sdma_memory_csr_err_cnt),
4691[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_tx_egress_fifo_cor_err_cnt),
4694[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_tx_read_pio_memory_cor_err_cnt),
4697[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_tx_read_sdma_memory_cor_err_cnt),
4700[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4701 CNTR_NORMAL,
4702 access_tx_sb_hdr_cor_err_cnt),
4703[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_tx_credit_overrun_err_cnt),
4706[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_tx_launch_fifo8_cor_err_cnt),
4709[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_tx_launch_fifo7_cor_err_cnt),
4712[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_tx_launch_fifo6_cor_err_cnt),
4715[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_tx_launch_fifo5_cor_err_cnt),
4718[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_tx_launch_fifo4_cor_err_cnt),
4721[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4722 CNTR_NORMAL,
4723 access_tx_launch_fifo3_cor_err_cnt),
4724[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4725 CNTR_NORMAL,
4726 access_tx_launch_fifo2_cor_err_cnt),
4727[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4728 CNTR_NORMAL,
4729 access_tx_launch_fifo1_cor_err_cnt),
4730[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4731 CNTR_NORMAL,
4732 access_tx_launch_fifo0_cor_err_cnt),
4733[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4734 CNTR_NORMAL,
4735 access_tx_credit_return_vl_err_cnt),
4736[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4737 CNTR_NORMAL,
4738 access_tx_hcrc_insertion_err_cnt),
4739[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4740 CNTR_NORMAL,
4741 access_tx_egress_fifo_unc_err_cnt),
4742[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4743 CNTR_NORMAL,
4744 access_tx_read_pio_memory_unc_err_cnt),
4745[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4746 CNTR_NORMAL,
4747 access_tx_read_sdma_memory_unc_err_cnt),
4748[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4749 CNTR_NORMAL,
4750 access_tx_sb_hdr_unc_err_cnt),
4751[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4752 CNTR_NORMAL,
4753 access_tx_credit_return_partiy_err_cnt),
4754[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4755 0, 0, CNTR_NORMAL,
4756 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4757[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4758 0, 0, CNTR_NORMAL,
4759 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4760[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4761 0, 0, CNTR_NORMAL,
4762 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4763[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4764 0, 0, CNTR_NORMAL,
4765 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4766[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4767 0, 0, CNTR_NORMAL,
4768 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4769[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4770 0, 0, CNTR_NORMAL,
4771 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4772[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4773 0, 0, CNTR_NORMAL,
4774 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4775[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4776 0, 0, CNTR_NORMAL,
4777 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4778[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4779 0, 0, CNTR_NORMAL,
4780 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4781[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4782 0, 0, CNTR_NORMAL,
4783 access_tx_sdma15_disallowed_packet_err_cnt),
4784[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4785 0, 0, CNTR_NORMAL,
4786 access_tx_sdma14_disallowed_packet_err_cnt),
4787[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4788 0, 0, CNTR_NORMAL,
4789 access_tx_sdma13_disallowed_packet_err_cnt),
4790[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4791 0, 0, CNTR_NORMAL,
4792 access_tx_sdma12_disallowed_packet_err_cnt),
4793[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4794 0, 0, CNTR_NORMAL,
4795 access_tx_sdma11_disallowed_packet_err_cnt),
4796[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4797 0, 0, CNTR_NORMAL,
4798 access_tx_sdma10_disallowed_packet_err_cnt),
4799[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4800 0, 0, CNTR_NORMAL,
4801 access_tx_sdma9_disallowed_packet_err_cnt),
4802[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4803 0, 0, CNTR_NORMAL,
4804 access_tx_sdma8_disallowed_packet_err_cnt),
4805[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4806 0, 0, CNTR_NORMAL,
4807 access_tx_sdma7_disallowed_packet_err_cnt),
4808[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4809 0, 0, CNTR_NORMAL,
4810 access_tx_sdma6_disallowed_packet_err_cnt),
4811[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4812 0, 0, CNTR_NORMAL,
4813 access_tx_sdma5_disallowed_packet_err_cnt),
4814[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4815 0, 0, CNTR_NORMAL,
4816 access_tx_sdma4_disallowed_packet_err_cnt),
4817[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4818 0, 0, CNTR_NORMAL,
4819 access_tx_sdma3_disallowed_packet_err_cnt),
4820[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4821 0, 0, CNTR_NORMAL,
4822 access_tx_sdma2_disallowed_packet_err_cnt),
4823[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4824 0, 0, CNTR_NORMAL,
4825 access_tx_sdma1_disallowed_packet_err_cnt),
4826[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4827 0, 0, CNTR_NORMAL,
4828 access_tx_sdma0_disallowed_packet_err_cnt),
4829[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_config_parity_err_cnt),
4832[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4833 CNTR_NORMAL,
4834 access_tx_sbrd_ctl_csr_parity_err_cnt),
4835[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4836 CNTR_NORMAL,
4837 access_tx_launch_csr_parity_err_cnt),
4838[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4839 CNTR_NORMAL,
4840 access_tx_illegal_vl_err_cnt),
4841[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4842 "TxSbrdCtlStateMachineParityErr", 0, 0,
4843 CNTR_NORMAL,
4844 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4845[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4846 CNTR_NORMAL,
4847 access_egress_reserved_10_err_cnt),
4848[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4849 CNTR_NORMAL,
4850 access_egress_reserved_9_err_cnt),
4851[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4852 0, 0, CNTR_NORMAL,
4853 access_tx_sdma_launch_intf_parity_err_cnt),
4854[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4855 CNTR_NORMAL,
4856 access_tx_pio_launch_intf_parity_err_cnt),
4857[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4858 CNTR_NORMAL,
4859 access_egress_reserved_6_err_cnt),
4860[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_tx_incorrect_link_state_err_cnt),
4863[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4864 CNTR_NORMAL,
4865 access_tx_linkdown_err_cnt),
4866[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4867 "EgressFifoUnderrunOrParityErr", 0, 0,
4868 CNTR_NORMAL,
4869 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4870[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4871 CNTR_NORMAL,
4872 access_egress_reserved_2_err_cnt),
4873[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4874 CNTR_NORMAL,
4875 access_tx_pkt_integrity_mem_unc_err_cnt),
4876[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4877 CNTR_NORMAL,
4878 access_tx_pkt_integrity_mem_cor_err_cnt),
4879/* SendErrStatus */
4880[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4881 CNTR_NORMAL,
4882 access_send_csr_write_bad_addr_err_cnt),
4883[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4884 CNTR_NORMAL,
4885 access_send_csr_read_bad_addr_err_cnt),
4886[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4887 CNTR_NORMAL,
4888 access_send_csr_parity_cnt),
4889/* SendCtxtErrStatus */
4890[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4891 CNTR_NORMAL,
4892 access_pio_write_out_of_bounds_err_cnt),
4893[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4894 CNTR_NORMAL,
4895 access_pio_write_overflow_err_cnt),
4896[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4897 0, 0, CNTR_NORMAL,
4898 access_pio_write_crosses_boundary_err_cnt),
4899[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4900 CNTR_NORMAL,
4901 access_pio_disallowed_packet_err_cnt),
4902[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4903 CNTR_NORMAL,
4904 access_pio_inconsistent_sop_err_cnt),
4905/* SendDmaEngErrStatus */
4906[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4907 0, 0, CNTR_NORMAL,
4908 access_sdma_header_request_fifo_cor_err_cnt),
4909[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4910 CNTR_NORMAL,
4911 access_sdma_header_storage_cor_err_cnt),
4912[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4913 CNTR_NORMAL,
4914 access_sdma_packet_tracking_cor_err_cnt),
4915[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4916 CNTR_NORMAL,
4917 access_sdma_assembly_cor_err_cnt),
4918[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4919 CNTR_NORMAL,
4920 access_sdma_desc_table_cor_err_cnt),
4921[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4922 0, 0, CNTR_NORMAL,
4923 access_sdma_header_request_fifo_unc_err_cnt),
4924[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_sdma_header_storage_unc_err_cnt),
4927[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4928 CNTR_NORMAL,
4929 access_sdma_packet_tracking_unc_err_cnt),
4930[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_sdma_assembly_unc_err_cnt),
4933[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_sdma_desc_table_unc_err_cnt),
4936[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4937 CNTR_NORMAL,
4938 access_sdma_timeout_err_cnt),
4939[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4940 CNTR_NORMAL,
4941 access_sdma_header_length_err_cnt),
4942[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4943 CNTR_NORMAL,
4944 access_sdma_header_address_err_cnt),
4945[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4946 CNTR_NORMAL,
4947 access_sdma_header_select_err_cnt),
4948[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4949 CNTR_NORMAL,
4950 access_sdma_reserved_9_err_cnt),
4951[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4952 CNTR_NORMAL,
4953 access_sdma_packet_desc_overflow_err_cnt),
4954[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4955 CNTR_NORMAL,
4956 access_sdma_length_mismatch_err_cnt),
4957[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4958 CNTR_NORMAL,
4959 access_sdma_halt_err_cnt),
4960[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4961 CNTR_NORMAL,
4962 access_sdma_mem_read_err_cnt),
4963[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4964 CNTR_NORMAL,
4965 access_sdma_first_desc_err_cnt),
4966[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4967 CNTR_NORMAL,
4968 access_sdma_tail_out_of_bounds_err_cnt),
4969[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4970 CNTR_NORMAL,
4971 access_sdma_too_long_err_cnt),
4972[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4973 CNTR_NORMAL,
4974 access_sdma_gen_mismatch_err_cnt),
4975[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4976 CNTR_NORMAL,
4977 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004978};
4979
4980static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4981[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4982 CNTR_NORMAL),
4983[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4984 CNTR_NORMAL),
4985[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4986 CNTR_NORMAL),
4987[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4988 CNTR_NORMAL),
4989[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4990 CNTR_NORMAL),
4991[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4992 CNTR_NORMAL),
4993[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4994 CNTR_NORMAL),
4995[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4996[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4997[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4998[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004999 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005000[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005001 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005002[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005003 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005004[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5005[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5006[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005007 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005008[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005009 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005010[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5011 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005012[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005013 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005014[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005015 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5016 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005017[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005018 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005019[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005020 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005021[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5022[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5023[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5024[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5025[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5026[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5027[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5028[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5029[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5030[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5031[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5032[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5033[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5034 access_sw_cpu_rc_acks),
5035[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005036 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005037[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005038 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005039[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5040[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5041[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5042[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5043[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5044[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5045[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5046[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5047[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5048[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5049[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5050[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5051[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5052[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5053[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5054[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5055[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5056[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5057[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5058[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5059[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5060[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5061[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5062[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5063[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5064[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5065[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5066[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5067[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5068[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5069[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5070[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5071[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5072[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5073[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5074[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5075[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5076[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5077[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5078[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5079[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5080[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5081[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5082[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5083[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5084[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5085[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5086[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5087[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5088[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5089[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5090[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5091[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5092[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5093[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5094[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5095[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5096[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5097[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5098[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5099[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5100[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5101[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5102[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5103[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5104[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5105[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5106[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5107[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5108[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5109[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5110[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5111[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5112[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5113[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5114[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5115[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5116[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5117[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5118[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5119};
5120
5121/* ======================================================================== */
5122
Mike Marciniszyn77241052015-07-30 15:17:43 -04005123/* return true if this is chip revision revision a */
5124int is_ax(struct hfi1_devdata *dd)
5125{
5126 u8 chip_rev_minor =
5127 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5128 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5129 return (chip_rev_minor & 0xf0) == 0;
5130}
5131
5132/* return true if this is chip revision revision b */
5133int is_bx(struct hfi1_devdata *dd)
5134{
5135 u8 chip_rev_minor =
5136 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5137 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005138 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005139}
5140
5141/*
5142 * Append string s to buffer buf. Arguments curp and len are the current
5143 * position and remaining length, respectively.
5144 *
5145 * return 0 on success, 1 on out of room
5146 */
5147static int append_str(char *buf, char **curp, int *lenp, const char *s)
5148{
5149 char *p = *curp;
5150 int len = *lenp;
5151 int result = 0; /* success */
5152 char c;
5153
5154 /* add a comma, if first in the buffer */
5155 if (p != buf) {
5156 if (len == 0) {
5157 result = 1; /* out of room */
5158 goto done;
5159 }
5160 *p++ = ',';
5161 len--;
5162 }
5163
5164 /* copy the string */
5165 while ((c = *s++) != 0) {
5166 if (len == 0) {
5167 result = 1; /* out of room */
5168 goto done;
5169 }
5170 *p++ = c;
5171 len--;
5172 }
5173
5174done:
5175 /* write return values */
5176 *curp = p;
5177 *lenp = len;
5178
5179 return result;
5180}
5181
5182/*
5183 * Using the given flag table, print a comma separated string into
5184 * the buffer. End in '*' if the buffer is too short.
5185 */
5186static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005187 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005188{
5189 char extra[32];
5190 char *p = buf;
5191 int len = buf_len;
5192 int no_room = 0;
5193 int i;
5194
5195 /* make sure there is at least 2 so we can form "*" */
5196 if (len < 2)
5197 return "";
5198
5199 len--; /* leave room for a nul */
5200 for (i = 0; i < table_size; i++) {
5201 if (flags & table[i].flag) {
5202 no_room = append_str(buf, &p, &len, table[i].str);
5203 if (no_room)
5204 break;
5205 flags &= ~table[i].flag;
5206 }
5207 }
5208
5209 /* any undocumented bits left? */
5210 if (!no_room && flags) {
5211 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5212 no_room = append_str(buf, &p, &len, extra);
5213 }
5214
5215 /* add * if ran out of room */
5216 if (no_room) {
5217 /* may need to back up to add space for a '*' */
5218 if (len == 0)
5219 --p;
5220 *p++ = '*';
5221 }
5222
5223 /* add final nul - space already allocated above */
5224 *p = 0;
5225 return buf;
5226}
5227
5228/* first 8 CCE error interrupt source names */
5229static const char * const cce_misc_names[] = {
5230 "CceErrInt", /* 0 */
5231 "RxeErrInt", /* 1 */
5232 "MiscErrInt", /* 2 */
5233 "Reserved3", /* 3 */
5234 "PioErrInt", /* 4 */
5235 "SDmaErrInt", /* 5 */
5236 "EgressErrInt", /* 6 */
5237 "TxeErrInt" /* 7 */
5238};
5239
5240/*
5241 * Return the miscellaneous error interrupt name.
5242 */
5243static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5244{
5245 if (source < ARRAY_SIZE(cce_misc_names))
5246 strncpy(buf, cce_misc_names[source], bsize);
5247 else
Jubin John17fb4f22016-02-14 20:21:52 -08005248 snprintf(buf, bsize, "Reserved%u",
5249 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005250
5251 return buf;
5252}
5253
5254/*
5255 * Return the SDMA engine error interrupt name.
5256 */
5257static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5258{
5259 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5260 return buf;
5261}
5262
5263/*
5264 * Return the send context error interrupt name.
5265 */
5266static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5267{
5268 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5269 return buf;
5270}
5271
5272static const char * const various_names[] = {
5273 "PbcInt",
5274 "GpioAssertInt",
5275 "Qsfp1Int",
5276 "Qsfp2Int",
5277 "TCritInt"
5278};
5279
5280/*
5281 * Return the various interrupt name.
5282 */
5283static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5284{
5285 if (source < ARRAY_SIZE(various_names))
5286 strncpy(buf, various_names[source], bsize);
5287 else
Jubin John8638b772016-02-14 20:19:24 -08005288 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005289 return buf;
5290}
5291
5292/*
5293 * Return the DC interrupt name.
5294 */
5295static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5296{
5297 static const char * const dc_int_names[] = {
5298 "common",
5299 "lcb",
5300 "8051",
5301 "lbm" /* local block merge */
5302 };
5303
5304 if (source < ARRAY_SIZE(dc_int_names))
5305 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5306 else
5307 snprintf(buf, bsize, "DCInt%u", source);
5308 return buf;
5309}
5310
5311static const char * const sdma_int_names[] = {
5312 "SDmaInt",
5313 "SdmaIdleInt",
5314 "SdmaProgressInt",
5315};
5316
5317/*
5318 * Return the SDMA engine interrupt name.
5319 */
5320static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5321{
5322 /* what interrupt */
5323 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5324 /* which engine */
5325 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5326
5327 if (likely(what < 3))
5328 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5329 else
5330 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5331 return buf;
5332}
5333
5334/*
5335 * Return the receive available interrupt name.
5336 */
5337static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5338{
5339 snprintf(buf, bsize, "RcvAvailInt%u", source);
5340 return buf;
5341}
5342
5343/*
5344 * Return the receive urgent interrupt name.
5345 */
5346static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5347{
5348 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5349 return buf;
5350}
5351
5352/*
5353 * Return the send credit interrupt name.
5354 */
5355static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5356{
5357 snprintf(buf, bsize, "SendCreditInt%u", source);
5358 return buf;
5359}
5360
5361/*
5362 * Return the reserved interrupt name.
5363 */
5364static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5365{
5366 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5367 return buf;
5368}
5369
5370static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5371{
5372 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005373 cce_err_status_flags,
5374 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005375}
5376
5377static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5378{
5379 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005380 rxe_err_status_flags,
5381 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005382}
5383
5384static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5385{
5386 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005387 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005388}
5389
5390static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5391{
5392 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005393 pio_err_status_flags,
5394 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005395}
5396
5397static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5398{
5399 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005400 sdma_err_status_flags,
5401 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005402}
5403
5404static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5405{
5406 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005407 egress_err_status_flags,
5408 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005409}
5410
5411static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5412{
5413 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005414 egress_err_info_flags,
5415 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005416}
5417
5418static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5419{
5420 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005421 send_err_status_flags,
5422 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005423}
5424
5425static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5426{
5427 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005428 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005429
5430 /*
5431 * For most these errors, there is nothing that can be done except
5432 * report or record it.
5433 */
5434 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005435 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005436
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005437 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5438 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005439 /* this error requires a manual drop into SPC freeze mode */
5440 /* then a fix up */
5441 start_freeze_handling(dd->pport, FREEZE_SELF);
5442 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005443
5444 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5445 if (reg & (1ull << i)) {
5446 incr_cntr64(&dd->cce_err_status_cnt[i]);
5447 /* maintain a counter over all cce_err_status errors */
5448 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5449 }
5450 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005451}
5452
5453/*
5454 * Check counters for receive errors that do not have an interrupt
5455 * associated with them.
5456 */
5457#define RCVERR_CHECK_TIME 10
5458static void update_rcverr_timer(unsigned long opaque)
5459{
5460 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5461 struct hfi1_pportdata *ppd = dd->pport;
5462 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5463
5464 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005465 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005466 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005467 set_link_down_reason(
5468 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5469 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005470 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5471 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005472 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005473
5474 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5475}
5476
5477static int init_rcverr(struct hfi1_devdata *dd)
5478{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305479 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005480 /* Assume the hardware counter has been reset */
5481 dd->rcv_ovfl_cnt = 0;
5482 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5483}
5484
5485static void free_rcverr(struct hfi1_devdata *dd)
5486{
5487 if (dd->rcverr_timer.data)
5488 del_timer_sync(&dd->rcverr_timer);
5489 dd->rcverr_timer.data = 0;
5490}
5491
5492static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5493{
5494 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005495 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005496
5497 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005498 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005499
5500 if (reg & ALL_RXE_FREEZE_ERR) {
5501 int flags = 0;
5502
5503 /*
5504 * Freeze mode recovery is disabled for the errors
5505 * in RXE_FREEZE_ABORT_MASK
5506 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005507 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005508 flags = FREEZE_ABORT;
5509
5510 start_freeze_handling(dd->pport, flags);
5511 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005512
5513 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5514 if (reg & (1ull << i))
5515 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5516 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005517}
5518
5519static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5520{
5521 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005522 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005523
5524 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005525 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005526 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5527 if (reg & (1ull << i))
5528 incr_cntr64(&dd->misc_err_status_cnt[i]);
5529 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005530}
5531
5532static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5533{
5534 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005535 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005536
5537 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005538 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005539
5540 if (reg & ALL_PIO_FREEZE_ERR)
5541 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005542
5543 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5544 if (reg & (1ull << i))
5545 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5546 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005547}
5548
5549static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5550{
5551 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005552 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005553
5554 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005555 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005556
5557 if (reg & ALL_SDMA_FREEZE_ERR)
5558 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005559
5560 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5561 if (reg & (1ull << i))
5562 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5563 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005564}
5565
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005566static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5567{
5568 incr_cntr64(&ppd->port_xmit_discards);
5569}
5570
Mike Marciniszyn77241052015-07-30 15:17:43 -04005571static void count_port_inactive(struct hfi1_devdata *dd)
5572{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005573 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005574}
5575
5576/*
5577 * We have had a "disallowed packet" error during egress. Determine the
5578 * integrity check which failed, and update relevant error counter, etc.
5579 *
5580 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5581 * bit of state per integrity check, and so we can miss the reason for an
5582 * egress error if more than one packet fails the same integrity check
5583 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5584 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005585static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5586 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005587{
5588 struct hfi1_pportdata *ppd = dd->pport;
5589 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5590 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5591 char buf[96];
5592
5593 /* clear down all observed info as quickly as possible after read */
5594 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5595
5596 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005597 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5598 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005599
5600 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005601 if (info & PORT_DISCARD_EGRESS_ERRS) {
5602 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005603
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005604 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005605 * Count all applicable bits as individual errors and
5606 * attribute them to the packet that triggered this handler.
5607 * This may not be completely accurate due to limitations
5608 * on the available hardware error information. There is
5609 * a single information register and any number of error
5610 * packets may have occurred and contributed to it before
5611 * this routine is called. This means that:
5612 * a) If multiple packets with the same error occur before
5613 * this routine is called, earlier packets are missed.
5614 * There is only a single bit for each error type.
5615 * b) Errors may not be attributed to the correct VL.
5616 * The driver is attributing all bits in the info register
5617 * to the packet that triggered this call, but bits
5618 * could be an accumulation of different packets with
5619 * different VLs.
5620 * c) A single error packet may have multiple counts attached
5621 * to it. There is no way for the driver to know if
5622 * multiple bits set in the info register are due to a
5623 * single packet or multiple packets. The driver assumes
5624 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005625 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005626 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005627 for (i = 0; i < weight; i++) {
5628 __count_port_discards(ppd);
5629 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5630 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5631 else if (vl == 15)
5632 incr_cntr64(&ppd->port_xmit_discards_vl
5633 [C_VL_15]);
5634 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005635 }
5636}
5637
5638/*
5639 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5640 * register. Does it represent a 'port inactive' error?
5641 */
5642static inline int port_inactive_err(u64 posn)
5643{
5644 return (posn >= SEES(TX_LINKDOWN) &&
5645 posn <= SEES(TX_INCORRECT_LINK_STATE));
5646}
5647
5648/*
5649 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5650 * register. Does it represent a 'disallowed packet' error?
5651 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005652static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005653{
5654 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5655 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5656}
5657
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005658/*
5659 * Input value is a bit position of one of the SDMA engine disallowed
5660 * packet errors. Return which engine. Use of this must be guarded by
5661 * disallowed_pkt_err().
5662 */
5663static inline int disallowed_pkt_engine(int posn)
5664{
5665 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5666}
5667
5668/*
5669 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5670 * be done.
5671 */
5672static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5673{
5674 struct sdma_vl_map *m;
5675 int vl;
5676
5677 /* range check */
5678 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5679 return -1;
5680
5681 rcu_read_lock();
5682 m = rcu_dereference(dd->sdma_map);
5683 vl = m->engine_to_vl[engine];
5684 rcu_read_unlock();
5685
5686 return vl;
5687}
5688
5689/*
5690 * Translate the send context (sofware index) into a VL. Return -1 if the
5691 * translation cannot be done.
5692 */
5693static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5694{
5695 struct send_context_info *sci;
5696 struct send_context *sc;
5697 int i;
5698
5699 sci = &dd->send_contexts[sw_index];
5700
5701 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005702 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005703 return -1;
5704
5705 sc = sci->sc;
5706 if (!sc)
5707 return -1;
5708 if (dd->vld[15].sc == sc)
5709 return 15;
5710 for (i = 0; i < num_vls; i++)
5711 if (dd->vld[i].sc == sc)
5712 return i;
5713
5714 return -1;
5715}
5716
Mike Marciniszyn77241052015-07-30 15:17:43 -04005717static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5718{
5719 u64 reg_copy = reg, handled = 0;
5720 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005721 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005722
5723 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5724 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005725 else if (is_ax(dd) &&
5726 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5727 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005728 start_freeze_handling(dd->pport, 0);
5729
5730 while (reg_copy) {
5731 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005732 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005733 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005734 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005735
5736 if (port_inactive_err(shift)) {
5737 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005738 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005739 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005740 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5741
5742 handle_send_egress_err_info(dd, vl);
5743 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005744 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005745 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005746 }
5747
5748 reg &= ~handled;
5749
5750 if (reg)
5751 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005752 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005753
5754 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5755 if (reg & (1ull << i))
5756 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5757 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005758}
5759
5760static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5761{
5762 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005763 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005764
5765 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005766 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005767
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005768 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5769 if (reg & (1ull << i))
5770 incr_cntr64(&dd->send_err_status_cnt[i]);
5771 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005772}
5773
5774/*
5775 * The maximum number of times the error clear down will loop before
5776 * blocking a repeating error. This value is arbitrary.
5777 */
5778#define MAX_CLEAR_COUNT 20
5779
5780/*
5781 * Clear and handle an error register. All error interrupts are funneled
5782 * through here to have a central location to correctly handle single-
5783 * or multi-shot errors.
5784 *
5785 * For non per-context registers, call this routine with a context value
5786 * of 0 so the per-context offset is zero.
5787 *
5788 * If the handler loops too many times, assume that something is wrong
5789 * and can't be fixed, so mask the error bits.
5790 */
5791static void interrupt_clear_down(struct hfi1_devdata *dd,
5792 u32 context,
5793 const struct err_reg_info *eri)
5794{
5795 u64 reg;
5796 u32 count;
5797
5798 /* read in a loop until no more errors are seen */
5799 count = 0;
5800 while (1) {
5801 reg = read_kctxt_csr(dd, context, eri->status);
5802 if (reg == 0)
5803 break;
5804 write_kctxt_csr(dd, context, eri->clear, reg);
5805 if (likely(eri->handler))
5806 eri->handler(dd, context, reg);
5807 count++;
5808 if (count > MAX_CLEAR_COUNT) {
5809 u64 mask;
5810
5811 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005812 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005813 /*
5814 * Read-modify-write so any other masked bits
5815 * remain masked.
5816 */
5817 mask = read_kctxt_csr(dd, context, eri->mask);
5818 mask &= ~reg;
5819 write_kctxt_csr(dd, context, eri->mask, mask);
5820 break;
5821 }
5822 }
5823}
5824
5825/*
5826 * CCE block "misc" interrupt. Source is < 16.
5827 */
5828static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5829{
5830 const struct err_reg_info *eri = &misc_errs[source];
5831
5832 if (eri->handler) {
5833 interrupt_clear_down(dd, 0, eri);
5834 } else {
5835 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005836 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005837 }
5838}
5839
5840static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5841{
5842 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005843 sc_err_status_flags,
5844 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005845}
5846
5847/*
5848 * Send context error interrupt. Source (hw_context) is < 160.
5849 *
5850 * All send context errors cause the send context to halt. The normal
5851 * clear-down mechanism cannot be used because we cannot clear the
5852 * error bits until several other long-running items are done first.
5853 * This is OK because with the context halted, nothing else is going
5854 * to happen on it anyway.
5855 */
5856static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5857 unsigned int hw_context)
5858{
5859 struct send_context_info *sci;
5860 struct send_context *sc;
5861 char flags[96];
5862 u64 status;
5863 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005864 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005865
5866 sw_index = dd->hw_to_sw[hw_context];
5867 if (sw_index >= dd->num_send_contexts) {
5868 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005869 "out of range sw index %u for send context %u\n",
5870 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005871 return;
5872 }
5873 sci = &dd->send_contexts[sw_index];
5874 sc = sci->sc;
5875 if (!sc) {
5876 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005877 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005878 return;
5879 }
5880
5881 /* tell the software that a halt has begun */
5882 sc_stop(sc, SCF_HALTED);
5883
5884 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5885
5886 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005887 send_context_err_status_string(flags, sizeof(flags),
5888 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005889
5890 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005891 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005892
5893 /*
5894 * Automatically restart halted kernel contexts out of interrupt
5895 * context. User contexts must ask the driver to restart the context.
5896 */
5897 if (sc->type != SC_USER)
5898 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005899
5900 /*
5901 * Update the counters for the corresponding status bits.
5902 * Note that these particular counters are aggregated over all
5903 * 160 contexts.
5904 */
5905 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5906 if (status & (1ull << i))
5907 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5908 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005909}
5910
5911static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5912 unsigned int source, u64 status)
5913{
5914 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005915 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005916
5917 sde = &dd->per_sdma[source];
5918#ifdef CONFIG_SDMA_VERBOSITY
5919 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5920 slashstrip(__FILE__), __LINE__, __func__);
5921 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5922 sde->this_idx, source, (unsigned long long)status);
5923#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005924 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005925 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005926
5927 /*
5928 * Update the counters for the corresponding status bits.
5929 * Note that these particular counters are aggregated over
5930 * all 16 DMA engines.
5931 */
5932 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5933 if (status & (1ull << i))
5934 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5935 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005936}
5937
5938/*
5939 * CCE block SDMA error interrupt. Source is < 16.
5940 */
5941static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5942{
5943#ifdef CONFIG_SDMA_VERBOSITY
5944 struct sdma_engine *sde = &dd->per_sdma[source];
5945
5946 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5947 slashstrip(__FILE__), __LINE__, __func__);
5948 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5949 source);
5950 sdma_dumpstate(sde);
5951#endif
5952 interrupt_clear_down(dd, source, &sdma_eng_err);
5953}
5954
5955/*
5956 * CCE block "various" interrupt. Source is < 8.
5957 */
5958static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5959{
5960 const struct err_reg_info *eri = &various_err[source];
5961
5962 /*
5963 * TCritInt cannot go through interrupt_clear_down()
5964 * because it is not a second tier interrupt. The handler
5965 * should be called directly.
5966 */
5967 if (source == TCRIT_INT_SOURCE)
5968 handle_temp_err(dd);
5969 else if (eri->handler)
5970 interrupt_clear_down(dd, 0, eri);
5971 else
5972 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005973 "%s: Unimplemented/reserved interrupt %d\n",
5974 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005975}
5976
5977static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5978{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005979 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005980 struct hfi1_pportdata *ppd = dd->pport;
5981 unsigned long flags;
5982 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5983
5984 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005985 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005986 dd_dev_info(dd, "%s: QSFP module removed\n",
5987 __func__);
5988
Mike Marciniszyn77241052015-07-30 15:17:43 -04005989 ppd->driver_link_ready = 0;
5990 /*
5991 * Cable removed, reset all our information about the
5992 * cache and cable capabilities
5993 */
5994
5995 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5996 /*
5997 * We don't set cache_refresh_required here as we expect
5998 * an interrupt when a cable is inserted
5999 */
6000 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006001 ppd->qsfp_info.reset_needed = 0;
6002 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006003 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006004 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006005 /* Invert the ModPresent pin now to detect plug-in */
6006 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6007 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006008
6009 if ((ppd->offline_disabled_reason >
6010 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006011 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006012 (ppd->offline_disabled_reason ==
6013 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6014 ppd->offline_disabled_reason =
6015 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006016 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006017
Mike Marciniszyn77241052015-07-30 15:17:43 -04006018 if (ppd->host_link_state == HLS_DN_POLL) {
6019 /*
6020 * The link is still in POLL. This means
6021 * that the normal link down processing
6022 * will not happen. We have to do it here
6023 * before turning the DC off.
6024 */
6025 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6026 }
6027 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006028 dd_dev_info(dd, "%s: QSFP module inserted\n",
6029 __func__);
6030
Mike Marciniszyn77241052015-07-30 15:17:43 -04006031 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6032 ppd->qsfp_info.cache_valid = 0;
6033 ppd->qsfp_info.cache_refresh_required = 1;
6034 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006035 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006036
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006037 /*
6038 * Stop inversion of ModPresent pin to detect
6039 * removal of the cable
6040 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006041 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006042 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6043 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6044
6045 ppd->offline_disabled_reason =
6046 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006047 }
6048 }
6049
6050 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006051 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006052 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006053 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6054 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006055 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6056 }
6057
6058 /* Schedule the QSFP work only if there is a cable attached. */
6059 if (qsfp_mod_present(ppd))
6060 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6061}
6062
6063static int request_host_lcb_access(struct hfi1_devdata *dd)
6064{
6065 int ret;
6066
6067 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006068 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6069 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006070 if (ret != HCMD_SUCCESS) {
6071 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006072 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006073 }
6074 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6075}
6076
6077static int request_8051_lcb_access(struct hfi1_devdata *dd)
6078{
6079 int ret;
6080
6081 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006082 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6083 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006084 if (ret != HCMD_SUCCESS) {
6085 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006086 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006087 }
6088 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6089}
6090
6091/*
6092 * Set the LCB selector - allow host access. The DCC selector always
6093 * points to the host.
6094 */
6095static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6096{
6097 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006098 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6099 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006100}
6101
6102/*
6103 * Clear the LCB selector - allow 8051 access. The DCC selector always
6104 * points to the host.
6105 */
6106static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6107{
6108 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006109 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006110}
6111
6112/*
6113 * Acquire LCB access from the 8051. If the host already has access,
6114 * just increment a counter. Otherwise, inform the 8051 that the
6115 * host is taking access.
6116 *
6117 * Returns:
6118 * 0 on success
6119 * -EBUSY if the 8051 has control and cannot be disturbed
6120 * -errno if unable to acquire access from the 8051
6121 */
6122int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6123{
6124 struct hfi1_pportdata *ppd = dd->pport;
6125 int ret = 0;
6126
6127 /*
6128 * Use the host link state lock so the operation of this routine
6129 * { link state check, selector change, count increment } can occur
6130 * as a unit against a link state change. Otherwise there is a
6131 * race between the state change and the count increment.
6132 */
6133 if (sleep_ok) {
6134 mutex_lock(&ppd->hls_lock);
6135 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006136 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006137 udelay(1);
6138 }
6139
6140 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006141 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006142 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006143 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006144 ret = -EBUSY;
6145 goto done;
6146 }
6147
6148 if (dd->lcb_access_count == 0) {
6149 ret = request_host_lcb_access(dd);
6150 if (ret) {
6151 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006152 "%s: unable to acquire LCB access, err %d\n",
6153 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006154 goto done;
6155 }
6156 set_host_lcb_access(dd);
6157 }
6158 dd->lcb_access_count++;
6159done:
6160 mutex_unlock(&ppd->hls_lock);
6161 return ret;
6162}
6163
6164/*
6165 * Release LCB access by decrementing the use count. If the count is moving
6166 * from 1 to 0, inform 8051 that it has control back.
6167 *
6168 * Returns:
6169 * 0 on success
6170 * -errno if unable to release access to the 8051
6171 */
6172int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6173{
6174 int ret = 0;
6175
6176 /*
6177 * Use the host link state lock because the acquire needed it.
6178 * Here, we only need to keep { selector change, count decrement }
6179 * as a unit.
6180 */
6181 if (sleep_ok) {
6182 mutex_lock(&dd->pport->hls_lock);
6183 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006184 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006185 udelay(1);
6186 }
6187
6188 if (dd->lcb_access_count == 0) {
6189 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006190 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006191 goto done;
6192 }
6193
6194 if (dd->lcb_access_count == 1) {
6195 set_8051_lcb_access(dd);
6196 ret = request_8051_lcb_access(dd);
6197 if (ret) {
6198 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006199 "%s: unable to release LCB access, err %d\n",
6200 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006201 /* restore host access if the grant didn't work */
6202 set_host_lcb_access(dd);
6203 goto done;
6204 }
6205 }
6206 dd->lcb_access_count--;
6207done:
6208 mutex_unlock(&dd->pport->hls_lock);
6209 return ret;
6210}
6211
6212/*
6213 * Initialize LCB access variables and state. Called during driver load,
6214 * after most of the initialization is finished.
6215 *
6216 * The DC default is LCB access on for the host. The driver defaults to
6217 * leaving access to the 8051. Assign access now - this constrains the call
6218 * to this routine to be after all LCB set-up is done. In particular, after
6219 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6220 */
6221static void init_lcb_access(struct hfi1_devdata *dd)
6222{
6223 dd->lcb_access_count = 0;
6224}
6225
6226/*
6227 * Write a response back to a 8051 request.
6228 */
6229static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6230{
6231 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006232 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6233 (u64)return_code <<
6234 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6235 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006236}
6237
6238/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006239 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006240 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006241static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006242{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006243 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006244 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006245 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006246 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006247
6248 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6249 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6250 return; /* no request */
6251
6252 /* zero out COMPLETED so the response is seen */
6253 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6254
6255 /* extract request details */
6256 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6257 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6258 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6259 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6260
6261 switch (type) {
6262 case HREQ_LOAD_CONFIG:
6263 case HREQ_SAVE_CONFIG:
6264 case HREQ_READ_CONFIG:
6265 case HREQ_SET_TX_EQ_ABS:
6266 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006267 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006268 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006269 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006270 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6271 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006272 case HREQ_CONFIG_DONE:
6273 hreq_response(dd, HREQ_SUCCESS, 0);
6274 break;
6275
6276 case HREQ_INTERFACE_TEST:
6277 hreq_response(dd, HREQ_SUCCESS, data);
6278 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006279 default:
6280 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6281 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6282 break;
6283 }
6284}
6285
6286static void write_global_credit(struct hfi1_devdata *dd,
6287 u8 vau, u16 total, u16 shared)
6288{
6289 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006290 ((u64)total <<
6291 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6292 ((u64)shared <<
6293 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6294 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006295}
6296
6297/*
6298 * Set up initial VL15 credits of the remote. Assumes the rest of
6299 * the CM credit registers are zero from a previous global or credit reset .
6300 */
6301void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6302{
6303 /* leave shared count at zero for both global and VL15 */
6304 write_global_credit(dd, vau, vl15buf, 0);
6305
Dennis Dalessandroeacc8302016-10-17 04:19:52 -07006306 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6307 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006308}
6309
6310/*
6311 * Zero all credit details from the previous connection and
6312 * reset the CM manager's internal counters.
6313 */
6314void reset_link_credits(struct hfi1_devdata *dd)
6315{
6316 int i;
6317
6318 /* remove all previous VL credit limits */
6319 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006320 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006321 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6322 write_global_credit(dd, 0, 0, 0);
6323 /* reset the CM block */
6324 pio_send_control(dd, PSC_CM_RESET);
6325}
6326
6327/* convert a vCU to a CU */
6328static u32 vcu_to_cu(u8 vcu)
6329{
6330 return 1 << vcu;
6331}
6332
6333/* convert a CU to a vCU */
6334static u8 cu_to_vcu(u32 cu)
6335{
6336 return ilog2(cu);
6337}
6338
6339/* convert a vAU to an AU */
6340static u32 vau_to_au(u8 vau)
6341{
6342 return 8 * (1 << vau);
6343}
6344
6345static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6346{
6347 ppd->sm_trap_qp = 0x0;
6348 ppd->sa_qp = 0x1;
6349}
6350
6351/*
6352 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6353 */
6354static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6355{
6356 u64 reg;
6357
6358 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6359 write_csr(dd, DC_LCB_CFG_RUN, 0);
6360 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6361 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006362 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006363 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6364 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6365 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006366 write_csr(dd, DCC_CFG_RESET, reg |
6367 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6368 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006369 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006370 if (!abort) {
6371 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6372 write_csr(dd, DCC_CFG_RESET, reg);
6373 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6374 }
6375}
6376
6377/*
6378 * This routine should be called after the link has been transitioned to
6379 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6380 * reset).
6381 *
6382 * The expectation is that the caller of this routine would have taken
6383 * care of properly transitioning the link into the correct state.
6384 */
6385static void dc_shutdown(struct hfi1_devdata *dd)
6386{
6387 unsigned long flags;
6388
6389 spin_lock_irqsave(&dd->dc8051_lock, flags);
6390 if (dd->dc_shutdown) {
6391 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6392 return;
6393 }
6394 dd->dc_shutdown = 1;
6395 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6396 /* Shutdown the LCB */
6397 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006398 /*
6399 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006400 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006401 * itself.
6402 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006403 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6404}
6405
Jubin John4d114fd2016-02-14 20:21:43 -08006406/*
6407 * Calling this after the DC has been brought out of reset should not
6408 * do any damage.
6409 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006410static void dc_start(struct hfi1_devdata *dd)
6411{
6412 unsigned long flags;
6413 int ret;
6414
6415 spin_lock_irqsave(&dd->dc8051_lock, flags);
6416 if (!dd->dc_shutdown)
6417 goto done;
6418 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6419 /* Take the 8051 out of reset */
6420 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6421 /* Wait until 8051 is ready */
6422 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6423 if (ret) {
6424 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006425 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006426 }
6427 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6428 write_csr(dd, DCC_CFG_RESET, 0x10);
6429 /* lcb_shutdown() with abort=1 does not restore these */
6430 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6431 spin_lock_irqsave(&dd->dc8051_lock, flags);
6432 dd->dc_shutdown = 0;
6433done:
6434 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6435}
6436
6437/*
6438 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6439 */
6440static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6441{
6442 u64 rx_radr, tx_radr;
6443 u32 version;
6444
6445 if (dd->icode != ICODE_FPGA_EMULATION)
6446 return;
6447
6448 /*
6449 * These LCB defaults on emulator _s are good, nothing to do here:
6450 * LCB_CFG_TX_FIFOS_RADR
6451 * LCB_CFG_RX_FIFOS_RADR
6452 * LCB_CFG_LN_DCLK
6453 * LCB_CFG_IGNORE_LOST_RCLK
6454 */
6455 if (is_emulator_s(dd))
6456 return;
6457 /* else this is _p */
6458
6459 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006460 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006461 version = 0x2d; /* all B0 use 0x2d or higher settings */
6462
6463 if (version <= 0x12) {
6464 /* release 0x12 and below */
6465
6466 /*
6467 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6468 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6469 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6470 */
6471 rx_radr =
6472 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6473 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6474 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6475 /*
6476 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6477 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6478 */
6479 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6480 } else if (version <= 0x18) {
6481 /* release 0x13 up to 0x18 */
6482 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6483 rx_radr =
6484 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6485 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6486 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6487 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6488 } else if (version == 0x19) {
6489 /* release 0x19 */
6490 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6491 rx_radr =
6492 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6493 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6494 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6495 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6496 } else if (version == 0x1a) {
6497 /* release 0x1a */
6498 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6499 rx_radr =
6500 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6501 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6502 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6503 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6504 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6505 } else {
6506 /* release 0x1b and higher */
6507 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6508 rx_radr =
6509 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6510 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6511 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6512 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6513 }
6514
6515 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6516 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6517 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006518 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006519 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6520}
6521
6522/*
6523 * Handle a SMA idle message
6524 *
6525 * This is a work-queue function outside of the interrupt.
6526 */
6527void handle_sma_message(struct work_struct *work)
6528{
6529 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6530 sma_message_work);
6531 struct hfi1_devdata *dd = ppd->dd;
6532 u64 msg;
6533 int ret;
6534
Jubin John4d114fd2016-02-14 20:21:43 -08006535 /*
6536 * msg is bytes 1-4 of the 40-bit idle message - the command code
6537 * is stripped off
6538 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006539 ret = read_idle_sma(dd, &msg);
6540 if (ret)
6541 return;
6542 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6543 /*
6544 * React to the SMA message. Byte[1] (0 for us) is the command.
6545 */
6546 switch (msg & 0xff) {
6547 case SMA_IDLE_ARM:
6548 /*
6549 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6550 * State Transitions
6551 *
6552 * Only expected in INIT or ARMED, discard otherwise.
6553 */
6554 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6555 ppd->neighbor_normal = 1;
6556 break;
6557 case SMA_IDLE_ACTIVE:
6558 /*
6559 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6560 * State Transitions
6561 *
6562 * Can activate the node. Discard otherwise.
6563 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006564 if (ppd->host_link_state == HLS_UP_ARMED &&
6565 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006566 ppd->neighbor_normal = 1;
6567 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6568 if (ret)
6569 dd_dev_err(
6570 dd,
6571 "%s: received Active SMA idle message, couldn't set link to Active\n",
6572 __func__);
6573 }
6574 break;
6575 default:
6576 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006577 "%s: received unexpected SMA idle message 0x%llx\n",
6578 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006579 break;
6580 }
6581}
6582
6583static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6584{
6585 u64 rcvctrl;
6586 unsigned long flags;
6587
6588 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6589 rcvctrl = read_csr(dd, RCV_CTRL);
6590 rcvctrl |= add;
6591 rcvctrl &= ~clear;
6592 write_csr(dd, RCV_CTRL, rcvctrl);
6593 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6594}
6595
6596static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6597{
6598 adjust_rcvctrl(dd, add, 0);
6599}
6600
6601static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6602{
6603 adjust_rcvctrl(dd, 0, clear);
6604}
6605
6606/*
6607 * Called from all interrupt handlers to start handling an SPC freeze.
6608 */
6609void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6610{
6611 struct hfi1_devdata *dd = ppd->dd;
6612 struct send_context *sc;
6613 int i;
6614
6615 if (flags & FREEZE_SELF)
6616 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6617
6618 /* enter frozen mode */
6619 dd->flags |= HFI1_FROZEN;
6620
6621 /* notify all SDMA engines that they are going into a freeze */
6622 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6623
6624 /* do halt pre-handling on all enabled send contexts */
6625 for (i = 0; i < dd->num_send_contexts; i++) {
6626 sc = dd->send_contexts[i].sc;
6627 if (sc && (sc->flags & SCF_ENABLED))
6628 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6629 }
6630
6631 /* Send context are frozen. Notify user space */
6632 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6633
6634 if (flags & FREEZE_ABORT) {
6635 dd_dev_err(dd,
6636 "Aborted freeze recovery. Please REBOOT system\n");
6637 return;
6638 }
6639 /* queue non-interrupt handler */
6640 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6641}
6642
6643/*
6644 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6645 * depending on the "freeze" parameter.
6646 *
6647 * No need to return an error if it times out, our only option
6648 * is to proceed anyway.
6649 */
6650static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6651{
6652 unsigned long timeout;
6653 u64 reg;
6654
6655 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6656 while (1) {
6657 reg = read_csr(dd, CCE_STATUS);
6658 if (freeze) {
6659 /* waiting until all indicators are set */
6660 if ((reg & ALL_FROZE) == ALL_FROZE)
6661 return; /* all done */
6662 } else {
6663 /* waiting until all indicators are clear */
6664 if ((reg & ALL_FROZE) == 0)
6665 return; /* all done */
6666 }
6667
6668 if (time_after(jiffies, timeout)) {
6669 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006670 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6671 freeze ? "" : "un", reg & ALL_FROZE,
6672 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006673 return;
6674 }
6675 usleep_range(80, 120);
6676 }
6677}
6678
6679/*
6680 * Do all freeze handling for the RXE block.
6681 */
6682static void rxe_freeze(struct hfi1_devdata *dd)
6683{
6684 int i;
6685
6686 /* disable port */
6687 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6688
6689 /* disable all receive contexts */
6690 for (i = 0; i < dd->num_rcv_contexts; i++)
6691 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6692}
6693
6694/*
6695 * Unfreeze handling for the RXE block - kernel contexts only.
6696 * This will also enable the port. User contexts will do unfreeze
6697 * handling on a per-context basis as they call into the driver.
6698 *
6699 */
6700static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6701{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006702 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006703 int i;
6704
6705 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006706 for (i = 0; i < dd->n_krcv_queues; i++) {
6707 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6708 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6709 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6710 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6711 hfi1_rcvctrl(dd, rcvmask, i);
6712 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006713
6714 /* enable port */
6715 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6716}
6717
6718/*
6719 * Non-interrupt SPC freeze handling.
6720 *
6721 * This is a work-queue function outside of the triggering interrupt.
6722 */
6723void handle_freeze(struct work_struct *work)
6724{
6725 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6726 freeze_work);
6727 struct hfi1_devdata *dd = ppd->dd;
6728
6729 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006730 wait_for_freeze_status(dd, 1);
6731
6732 /* SPC is now frozen */
6733
6734 /* do send PIO freeze steps */
6735 pio_freeze(dd);
6736
6737 /* do send DMA freeze steps */
6738 sdma_freeze(dd);
6739
6740 /* do send egress freeze steps - nothing to do */
6741
6742 /* do receive freeze steps */
6743 rxe_freeze(dd);
6744
6745 /*
6746 * Unfreeze the hardware - clear the freeze, wait for each
6747 * block's frozen bit to clear, then clear the frozen flag.
6748 */
6749 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6750 wait_for_freeze_status(dd, 0);
6751
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006752 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006753 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6754 wait_for_freeze_status(dd, 1);
6755 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6756 wait_for_freeze_status(dd, 0);
6757 }
6758
6759 /* do send PIO unfreeze steps for kernel contexts */
6760 pio_kernel_unfreeze(dd);
6761
6762 /* do send DMA unfreeze steps */
6763 sdma_unfreeze(dd);
6764
6765 /* do send egress unfreeze steps - nothing to do */
6766
6767 /* do receive unfreeze steps for kernel contexts */
6768 rxe_kernel_unfreeze(dd);
6769
6770 /*
6771 * The unfreeze procedure touches global device registers when
6772 * it disables and re-enables RXE. Mark the device unfrozen
6773 * after all that is done so other parts of the driver waiting
6774 * for the device to unfreeze don't do things out of order.
6775 *
6776 * The above implies that the meaning of HFI1_FROZEN flag is
6777 * "Device has gone into freeze mode and freeze mode handling
6778 * is still in progress."
6779 *
6780 * The flag will be removed when freeze mode processing has
6781 * completed.
6782 */
6783 dd->flags &= ~HFI1_FROZEN;
6784 wake_up(&dd->event_queue);
6785
6786 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006787}
6788
6789/*
6790 * Handle a link up interrupt from the 8051.
6791 *
6792 * This is a work-queue function outside of the interrupt.
6793 */
6794void handle_link_up(struct work_struct *work)
6795{
6796 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006797 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006798 set_link_state(ppd, HLS_UP_INIT);
6799
6800 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6801 read_ltp_rtt(ppd->dd);
6802 /*
6803 * OPA specifies that certain counters are cleared on a transition
6804 * to link up, so do that.
6805 */
6806 clear_linkup_counters(ppd->dd);
6807 /*
6808 * And (re)set link up default values.
6809 */
6810 set_linkup_defaults(ppd);
6811
6812 /* enforce link speed enabled */
6813 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6814 /* oops - current speed is not enabled, bounce */
6815 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006816 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6817 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006818 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006819 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006820 set_link_state(ppd, HLS_DN_OFFLINE);
6821 start_link(ppd);
6822 }
6823}
6824
Jubin John4d114fd2016-02-14 20:21:43 -08006825/*
6826 * Several pieces of LNI information were cached for SMA in ppd.
6827 * Reset these on link down
6828 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006829static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6830{
6831 ppd->neighbor_guid = 0;
6832 ppd->neighbor_port_number = 0;
6833 ppd->neighbor_type = 0;
6834 ppd->neighbor_fm_security = 0;
6835}
6836
Dean Luickfeb831d2016-04-14 08:31:36 -07006837static const char * const link_down_reason_strs[] = {
6838 [OPA_LINKDOWN_REASON_NONE] = "None",
6839 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6840 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6841 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6842 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6843 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6844 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6845 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6846 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6848 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6849 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6850 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6851 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6852 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6853 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6854 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6855 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6856 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6857 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6858 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6859 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6860 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6861 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6862 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6863 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6864 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6865 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6866 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6867 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6868 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6869 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6870 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6871 "Excessive buffer overrun",
6872 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6873 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6874 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6875 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6876 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6877 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6878 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6879 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6880 "Local media not installed",
6881 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6882 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6883 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6884 "End to end not installed",
6885 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6886 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6887 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6888 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6889 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6890 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6891};
6892
6893/* return the neighbor link down reason string */
6894static const char *link_down_reason_str(u8 reason)
6895{
6896 const char *str = NULL;
6897
6898 if (reason < ARRAY_SIZE(link_down_reason_strs))
6899 str = link_down_reason_strs[reason];
6900 if (!str)
6901 str = "(invalid)";
6902
6903 return str;
6904}
6905
Mike Marciniszyn77241052015-07-30 15:17:43 -04006906/*
6907 * Handle a link down interrupt from the 8051.
6908 *
6909 * This is a work-queue function outside of the interrupt.
6910 */
6911void handle_link_down(struct work_struct *work)
6912{
6913 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006914 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006915 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006916 link_down_work);
6917 int was_up;
6918 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006919
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006920 if ((ppd->host_link_state &
6921 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6922 ppd->port_type == PORT_TYPE_FIXED)
6923 ppd->offline_disabled_reason =
6924 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6925
6926 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006927 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006928 set_link_state(ppd, HLS_DN_OFFLINE);
6929
Dean Luickfeb831d2016-04-14 08:31:36 -07006930 if (was_up) {
6931 lcl_reason = 0;
6932 /* link down reason is only valid if the link was up */
6933 read_link_down_reason(ppd->dd, &link_down_reason);
6934 switch (link_down_reason) {
6935 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6936 /* the link went down, no idle message reason */
6937 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6938 ldr_str);
6939 break;
6940 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6941 /*
6942 * The neighbor reason is only valid if an idle message
6943 * was received for it.
6944 */
6945 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6946 dd_dev_info(ppd->dd,
6947 "%sNeighbor link down message %d, %s\n",
6948 ldr_str, neigh_reason,
6949 link_down_reason_str(neigh_reason));
6950 break;
6951 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6952 dd_dev_info(ppd->dd,
6953 "%sHost requested link to go offline\n",
6954 ldr_str);
6955 break;
6956 default:
6957 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6958 ldr_str, link_down_reason);
6959 break;
6960 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006961
Dean Luickfeb831d2016-04-14 08:31:36 -07006962 /*
6963 * If no reason, assume peer-initiated but missed
6964 * LinkGoingDown idle flits.
6965 */
6966 if (neigh_reason == 0)
6967 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6968 } else {
6969 /* went down while polling or going up */
6970 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6971 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006972
6973 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6974
Dean Luick015e91f2016-04-14 08:31:42 -07006975 /* inform the SMA when the link transitions from up to down */
6976 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6977 ppd->neigh_link_down_reason.sma == 0) {
6978 ppd->local_link_down_reason.sma =
6979 ppd->local_link_down_reason.latest;
6980 ppd->neigh_link_down_reason.sma =
6981 ppd->neigh_link_down_reason.latest;
6982 }
6983
Mike Marciniszyn77241052015-07-30 15:17:43 -04006984 reset_neighbor_info(ppd);
6985
6986 /* disable the port */
6987 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6988
Jubin John4d114fd2016-02-14 20:21:43 -08006989 /*
6990 * If there is no cable attached, turn the DC off. Otherwise,
6991 * start the link bring up.
6992 */
Dean Luick0db9dec2016-09-06 04:35:20 -07006993 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006994 dc_shutdown(ppd->dd);
Dean Luick0db9dec2016-09-06 04:35:20 -07006995 else
Mike Marciniszyn77241052015-07-30 15:17:43 -04006996 start_link(ppd);
6997}
6998
6999void handle_link_bounce(struct work_struct *work)
7000{
7001 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7002 link_bounce_work);
7003
7004 /*
7005 * Only do something if the link is currently up.
7006 */
7007 if (ppd->host_link_state & HLS_UP) {
7008 set_link_state(ppd, HLS_DN_OFFLINE);
7009 start_link(ppd);
7010 } else {
7011 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007012 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007013 }
7014}
7015
7016/*
7017 * Mask conversion: Capability exchange to Port LTP. The capability
7018 * exchange has an implicit 16b CRC that is mandatory.
7019 */
7020static int cap_to_port_ltp(int cap)
7021{
7022 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7023
7024 if (cap & CAP_CRC_14B)
7025 port_ltp |= PORT_LTP_CRC_MODE_14;
7026 if (cap & CAP_CRC_48B)
7027 port_ltp |= PORT_LTP_CRC_MODE_48;
7028 if (cap & CAP_CRC_12B_16B_PER_LANE)
7029 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7030
7031 return port_ltp;
7032}
7033
7034/*
7035 * Convert an OPA Port LTP mask to capability mask
7036 */
7037int port_ltp_to_cap(int port_ltp)
7038{
7039 int cap_mask = 0;
7040
7041 if (port_ltp & PORT_LTP_CRC_MODE_14)
7042 cap_mask |= CAP_CRC_14B;
7043 if (port_ltp & PORT_LTP_CRC_MODE_48)
7044 cap_mask |= CAP_CRC_48B;
7045 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7046 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7047
7048 return cap_mask;
7049}
7050
7051/*
7052 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7053 */
7054static int lcb_to_port_ltp(int lcb_crc)
7055{
7056 int port_ltp = 0;
7057
7058 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7059 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7060 else if (lcb_crc == LCB_CRC_48B)
7061 port_ltp = PORT_LTP_CRC_MODE_48;
7062 else if (lcb_crc == LCB_CRC_14B)
7063 port_ltp = PORT_LTP_CRC_MODE_14;
7064 else
7065 port_ltp = PORT_LTP_CRC_MODE_16;
7066
7067 return port_ltp;
7068}
7069
7070/*
7071 * Our neighbor has indicated that we are allowed to act as a fabric
7072 * manager, so place the full management partition key in the second
7073 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7074 * that we should already have the limited management partition key in
7075 * array element 1, and also that the port is not yet up when
7076 * add_full_mgmt_pkey() is invoked.
7077 */
7078static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7079{
7080 struct hfi1_devdata *dd = ppd->dd;
7081
Dean Luick87645222015-12-01 15:38:21 -05007082 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7083 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7084 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7085 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007086 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7087 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007088 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007089}
7090
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007091static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007092{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007093 if (ppd->pkeys[2] != 0) {
7094 ppd->pkeys[2] = 0;
7095 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007096 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007097 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007098}
7099
Mike Marciniszyn77241052015-07-30 15:17:43 -04007100/*
7101 * Convert the given link width to the OPA link width bitmask.
7102 */
7103static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7104{
7105 switch (width) {
7106 case 0:
7107 /*
7108 * Simulator and quick linkup do not set the width.
7109 * Just set it to 4x without complaint.
7110 */
7111 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7112 return OPA_LINK_WIDTH_4X;
7113 return 0; /* no lanes up */
7114 case 1: return OPA_LINK_WIDTH_1X;
7115 case 2: return OPA_LINK_WIDTH_2X;
7116 case 3: return OPA_LINK_WIDTH_3X;
7117 default:
7118 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007119 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007120 /* fall through */
7121 case 4: return OPA_LINK_WIDTH_4X;
7122 }
7123}
7124
7125/*
7126 * Do a population count on the bottom nibble.
7127 */
7128static const u8 bit_counts[16] = {
7129 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7130};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007131
Mike Marciniszyn77241052015-07-30 15:17:43 -04007132static inline u8 nibble_to_count(u8 nibble)
7133{
7134 return bit_counts[nibble & 0xf];
7135}
7136
7137/*
7138 * Read the active lane information from the 8051 registers and return
7139 * their widths.
7140 *
7141 * Active lane information is found in these 8051 registers:
7142 * enable_lane_tx
7143 * enable_lane_rx
7144 */
7145static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7146 u16 *rx_width)
7147{
7148 u16 tx, rx;
7149 u8 enable_lane_rx;
7150 u8 enable_lane_tx;
7151 u8 tx_polarity_inversion;
7152 u8 rx_polarity_inversion;
7153 u8 max_rate;
7154
7155 /* read the active lanes */
7156 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007157 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007158 read_local_lni(dd, &enable_lane_rx);
7159
7160 /* convert to counts */
7161 tx = nibble_to_count(enable_lane_tx);
7162 rx = nibble_to_count(enable_lane_rx);
7163
7164 /*
7165 * Set link_speed_active here, overriding what was set in
7166 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7167 * set the max_rate field in handle_verify_cap until v0.19.
7168 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007169 if ((dd->icode == ICODE_RTL_SILICON) &&
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007170 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007171 /* max_rate: 0 = 12.5G, 1 = 25G */
7172 switch (max_rate) {
7173 case 0:
7174 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7175 break;
7176 default:
7177 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007178 "%s: unexpected max rate %d, using 25Gb\n",
7179 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007180 /* fall through */
7181 case 1:
7182 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7183 break;
7184 }
7185 }
7186
7187 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007188 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7189 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007190 *tx_width = link_width_to_bits(dd, tx);
7191 *rx_width = link_width_to_bits(dd, rx);
7192}
7193
7194/*
7195 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7196 * Valid after the end of VerifyCap and during LinkUp. Does not change
7197 * after link up. I.e. look elsewhere for downgrade information.
7198 *
7199 * Bits are:
7200 * + bits [7:4] contain the number of active transmitters
7201 * + bits [3:0] contain the number of active receivers
7202 * These are numbers 1 through 4 and can be different values if the
7203 * link is asymmetric.
7204 *
7205 * verify_cap_local_fm_link_width[0] retains its original value.
7206 */
7207static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7208 u16 *rx_width)
7209{
7210 u16 widths, tx, rx;
7211 u8 misc_bits, local_flags;
7212 u16 active_tx, active_rx;
7213
7214 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7215 tx = widths >> 12;
7216 rx = (widths >> 8) & 0xf;
7217
7218 *tx_width = link_width_to_bits(dd, tx);
7219 *rx_width = link_width_to_bits(dd, rx);
7220
7221 /* print the active widths */
7222 get_link_widths(dd, &active_tx, &active_rx);
7223}
7224
7225/*
7226 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7227 * hardware information when the link first comes up.
7228 *
7229 * The link width is not available until after VerifyCap.AllFramesReceived
7230 * (the trigger for handle_verify_cap), so this is outside that routine
7231 * and should be called when the 8051 signals linkup.
7232 */
7233void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7234{
7235 u16 tx_width, rx_width;
7236
7237 /* get end-of-LNI link widths */
7238 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7239
7240 /* use tx_width as the link is supposed to be symmetric on link up */
7241 ppd->link_width_active = tx_width;
7242 /* link width downgrade active (LWD.A) starts out matching LW.A */
7243 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7244 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7245 /* per OPA spec, on link up LWD.E resets to LWD.S */
7246 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7247 /* cache the active egress rate (units {10^6 bits/sec]) */
7248 ppd->current_egress_rate = active_egress_rate(ppd);
7249}
7250
7251/*
7252 * Handle a verify capabilities interrupt from the 8051.
7253 *
7254 * This is a work-queue function outside of the interrupt.
7255 */
7256void handle_verify_cap(struct work_struct *work)
7257{
7258 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7259 link_vc_work);
7260 struct hfi1_devdata *dd = ppd->dd;
7261 u64 reg;
7262 u8 power_management;
7263 u8 continious;
7264 u8 vcu;
7265 u8 vau;
7266 u8 z;
7267 u16 vl15buf;
7268 u16 link_widths;
7269 u16 crc_mask;
7270 u16 crc_val;
7271 u16 device_id;
7272 u16 active_tx, active_rx;
7273 u8 partner_supported_crc;
7274 u8 remote_tx_rate;
7275 u8 device_rev;
7276
7277 set_link_state(ppd, HLS_VERIFY_CAP);
7278
7279 lcb_shutdown(dd, 0);
7280 adjust_lcb_for_fpga_serdes(dd);
7281
7282 /*
7283 * These are now valid:
7284 * remote VerifyCap fields in the general LNI config
7285 * CSR DC8051_STS_REMOTE_GUID
7286 * CSR DC8051_STS_REMOTE_NODE_TYPE
7287 * CSR DC8051_STS_REMOTE_FM_SECURITY
7288 * CSR DC8051_STS_REMOTE_PORT_NO
7289 */
7290
7291 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007292 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7293 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007294 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7295 read_remote_device_id(dd, &device_id, &device_rev);
7296 /*
7297 * And the 'MgmtAllowed' information, which is exchanged during
7298 * LNI, is also be available at this point.
7299 */
7300 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7301 /* print the active widths */
7302 get_link_widths(dd, &active_tx, &active_rx);
7303 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007304 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7305 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007306 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007307 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7308 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7309 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007310 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007311 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007312 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007313 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007314 /*
7315 * The peer vAU value just read is the peer receiver value. HFI does
7316 * not support a transmit vAU of 0 (AU == 8). We advertised that
7317 * with Z=1 in the fabric capabilities sent to the peer. The peer
7318 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7319 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7320 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7321 * subject to the Z value exception.
7322 */
7323 if (vau == 0)
7324 vau = 1;
7325 set_up_vl15(dd, vau, vl15buf);
7326
7327 /* set up the LCB CRC mode */
7328 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7329
7330 /* order is important: use the lowest bit in common */
7331 if (crc_mask & CAP_CRC_14B)
7332 crc_val = LCB_CRC_14B;
7333 else if (crc_mask & CAP_CRC_48B)
7334 crc_val = LCB_CRC_48B;
7335 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7336 crc_val = LCB_CRC_12B_16B_PER_LANE;
7337 else
7338 crc_val = LCB_CRC_16B;
7339
7340 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7341 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7342 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7343
7344 /* set (14b only) or clear sideband credit */
7345 reg = read_csr(dd, SEND_CM_CTRL);
7346 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7347 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007348 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007349 } else {
7350 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007351 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007352 }
7353
7354 ppd->link_speed_active = 0; /* invalid value */
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007355 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007356 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7357 switch (remote_tx_rate) {
7358 case 0:
7359 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7360 break;
7361 case 1:
7362 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7363 break;
7364 }
7365 } else {
7366 /* actual rate is highest bit of the ANDed rates */
7367 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7368
7369 if (rate & 2)
7370 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7371 else if (rate & 1)
7372 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7373 }
7374 if (ppd->link_speed_active == 0) {
7375 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007376 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007377 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7378 }
7379
7380 /*
7381 * Cache the values of the supported, enabled, and active
7382 * LTP CRC modes to return in 'portinfo' queries. But the bit
7383 * flags that are returned in the portinfo query differ from
7384 * what's in the link_crc_mask, crc_sizes, and crc_val
7385 * variables. Convert these here.
7386 */
7387 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7388 /* supported crc modes */
7389 ppd->port_ltp_crc_mode |=
7390 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7391 /* enabled crc modes */
7392 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7393 /* active crc mode */
7394
7395 /* set up the remote credit return table */
7396 assign_remote_cm_au_table(dd, vcu);
7397
7398 /*
7399 * The LCB is reset on entry to handle_verify_cap(), so this must
7400 * be applied on every link up.
7401 *
7402 * Adjust LCB error kill enable to kill the link if
7403 * these RBUF errors are seen:
7404 * REPLAY_BUF_MBE_SMASK
7405 * FLIT_INPUT_BUF_MBE_SMASK
7406 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007407 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007408 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7409 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7410 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7411 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7412 }
7413
7414 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7415 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7416
7417 /* give 8051 access to the LCB CSRs */
7418 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7419 set_8051_lcb_access(dd);
7420
7421 ppd->neighbor_guid =
7422 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7423 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7424 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7425 ppd->neighbor_type =
7426 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7427 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7428 ppd->neighbor_fm_security =
7429 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7430 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7431 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007432 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7433 ppd->neighbor_guid, ppd->neighbor_type,
7434 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007435 if (ppd->mgmt_allowed)
7436 add_full_mgmt_pkey(ppd);
7437
7438 /* tell the 8051 to go to LinkUp */
7439 set_link_state(ppd, HLS_GOING_UP);
7440}
7441
7442/*
7443 * Apply the link width downgrade enabled policy against the current active
7444 * link widths.
7445 *
7446 * Called when the enabled policy changes or the active link widths change.
7447 */
7448void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7449{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007450 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007451 int tries;
7452 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007453 u16 tx, rx;
7454
Dean Luick323fd782015-11-16 21:59:24 -05007455 /* use the hls lock to avoid a race with actual link up */
7456 tries = 0;
7457retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007458 mutex_lock(&ppd->hls_lock);
7459 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007460 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007461 /* still going up..wait and retry */
7462 if (ppd->host_link_state & HLS_GOING_UP) {
7463 if (++tries < 1000) {
7464 mutex_unlock(&ppd->hls_lock);
7465 usleep_range(100, 120); /* arbitrary */
7466 goto retry;
7467 }
7468 dd_dev_err(ppd->dd,
7469 "%s: giving up waiting for link state change\n",
7470 __func__);
7471 }
7472 goto done;
7473 }
7474
7475 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007476
7477 if (refresh_widths) {
7478 get_link_widths(ppd->dd, &tx, &rx);
7479 ppd->link_width_downgrade_tx_active = tx;
7480 ppd->link_width_downgrade_rx_active = rx;
7481 }
7482
Dean Luickf9b56352016-04-14 08:31:30 -07007483 if (ppd->link_width_downgrade_tx_active == 0 ||
7484 ppd->link_width_downgrade_rx_active == 0) {
7485 /* the 8051 reported a dead link as a downgrade */
7486 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7487 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007488 /* downgrade is disabled */
7489
7490 /* bounce if not at starting active width */
7491 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007492 ppd->link_width_downgrade_tx_active) ||
7493 (ppd->link_width_active !=
7494 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007495 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007496 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007497 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007498 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7499 ppd->link_width_active,
7500 ppd->link_width_downgrade_tx_active,
7501 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007502 do_bounce = 1;
7503 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007504 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7505 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007506 /* Tx or Rx is outside the enabled policy */
7507 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007508 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007509 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007510 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7511 lwde, ppd->link_width_downgrade_tx_active,
7512 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007513 do_bounce = 1;
7514 }
7515
Dean Luick323fd782015-11-16 21:59:24 -05007516done:
7517 mutex_unlock(&ppd->hls_lock);
7518
Mike Marciniszyn77241052015-07-30 15:17:43 -04007519 if (do_bounce) {
7520 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007521 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007522 set_link_state(ppd, HLS_DN_OFFLINE);
7523 start_link(ppd);
7524 }
7525}
7526
7527/*
7528 * Handle a link downgrade interrupt from the 8051.
7529 *
7530 * This is a work-queue function outside of the interrupt.
7531 */
7532void handle_link_downgrade(struct work_struct *work)
7533{
7534 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7535 link_downgrade_work);
7536
7537 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7538 apply_link_downgrade_policy(ppd, 1);
7539}
7540
7541static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7542{
7543 return flag_string(buf, buf_len, flags, dcc_err_flags,
7544 ARRAY_SIZE(dcc_err_flags));
7545}
7546
7547static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7548{
7549 return flag_string(buf, buf_len, flags, lcb_err_flags,
7550 ARRAY_SIZE(lcb_err_flags));
7551}
7552
7553static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7554{
7555 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7556 ARRAY_SIZE(dc8051_err_flags));
7557}
7558
7559static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7560{
7561 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7562 ARRAY_SIZE(dc8051_info_err_flags));
7563}
7564
7565static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7566{
7567 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7568 ARRAY_SIZE(dc8051_info_host_msg_flags));
7569}
7570
7571static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7572{
7573 struct hfi1_pportdata *ppd = dd->pport;
7574 u64 info, err, host_msg;
7575 int queue_link_down = 0;
7576 char buf[96];
7577
7578 /* look at the flags */
7579 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7580 /* 8051 information set by firmware */
7581 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7582 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7583 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7584 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7585 host_msg = (info >>
7586 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7587 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7588
7589 /*
7590 * Handle error flags.
7591 */
7592 if (err & FAILED_LNI) {
7593 /*
7594 * LNI error indications are cleared by the 8051
7595 * only when starting polling. Only pay attention
7596 * to them when in the states that occur during
7597 * LNI.
7598 */
7599 if (ppd->host_link_state
7600 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7601 queue_link_down = 1;
7602 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007603 dc8051_info_err_string(buf,
7604 sizeof(buf),
7605 err &
7606 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007607 }
7608 err &= ~(u64)FAILED_LNI;
7609 }
Dean Luick6d014532015-12-01 15:38:23 -05007610 /* unknown frames can happen durning LNI, just count */
7611 if (err & UNKNOWN_FRAME) {
7612 ppd->unknown_frame_count++;
7613 err &= ~(u64)UNKNOWN_FRAME;
7614 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007615 if (err) {
7616 /* report remaining errors, but do not do anything */
7617 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007618 dc8051_info_err_string(buf, sizeof(buf),
7619 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007620 }
7621
7622 /*
7623 * Handle host message flags.
7624 */
7625 if (host_msg & HOST_REQ_DONE) {
7626 /*
7627 * Presently, the driver does a busy wait for
7628 * host requests to complete. This is only an
7629 * informational message.
7630 * NOTE: The 8051 clears the host message
7631 * information *on the next 8051 command*.
7632 * Therefore, when linkup is achieved,
7633 * this flag will still be set.
7634 */
7635 host_msg &= ~(u64)HOST_REQ_DONE;
7636 }
7637 if (host_msg & BC_SMA_MSG) {
7638 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7639 host_msg &= ~(u64)BC_SMA_MSG;
7640 }
7641 if (host_msg & LINKUP_ACHIEVED) {
7642 dd_dev_info(dd, "8051: Link up\n");
7643 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7644 host_msg &= ~(u64)LINKUP_ACHIEVED;
7645 }
7646 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007647 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007648 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7649 }
7650 if (host_msg & VERIFY_CAP_FRAME) {
7651 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7652 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7653 }
7654 if (host_msg & LINK_GOING_DOWN) {
7655 const char *extra = "";
7656 /* no downgrade action needed if going down */
7657 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7658 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7659 extra = " (ignoring downgrade)";
7660 }
7661 dd_dev_info(dd, "8051: Link down%s\n", extra);
7662 queue_link_down = 1;
7663 host_msg &= ~(u64)LINK_GOING_DOWN;
7664 }
7665 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7666 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7667 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7668 }
7669 if (host_msg) {
7670 /* report remaining messages, but do not do anything */
7671 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007672 dc8051_info_host_msg_string(buf,
7673 sizeof(buf),
7674 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007675 }
7676
7677 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7678 }
7679 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7680 /*
7681 * Lost the 8051 heartbeat. If this happens, we
7682 * receive constant interrupts about it. Disable
7683 * the interrupt after the first.
7684 */
7685 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7686 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007687 read_csr(dd, DC_DC8051_ERR_EN) &
7688 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007689
7690 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7691 }
7692 if (reg) {
7693 /* report the error, but do not do anything */
7694 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007695 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007696 }
7697
7698 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007699 /*
7700 * if the link is already going down or disabled, do not
7701 * queue another
7702 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007703 if ((ppd->host_link_state &
7704 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7705 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007706 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007707 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007708 } else {
7709 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7710 }
7711 }
7712}
7713
7714static const char * const fm_config_txt[] = {
7715[0] =
7716 "BadHeadDist: Distance violation between two head flits",
7717[1] =
7718 "BadTailDist: Distance violation between two tail flits",
7719[2] =
7720 "BadCtrlDist: Distance violation between two credit control flits",
7721[3] =
7722 "BadCrdAck: Credits return for unsupported VL",
7723[4] =
7724 "UnsupportedVLMarker: Received VL Marker",
7725[5] =
7726 "BadPreempt: Exceeded the preemption nesting level",
7727[6] =
7728 "BadControlFlit: Received unsupported control flit",
7729/* no 7 */
7730[8] =
7731 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7732};
7733
7734static const char * const port_rcv_txt[] = {
7735[1] =
7736 "BadPktLen: Illegal PktLen",
7737[2] =
7738 "PktLenTooLong: Packet longer than PktLen",
7739[3] =
7740 "PktLenTooShort: Packet shorter than PktLen",
7741[4] =
7742 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7743[5] =
7744 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7745[6] =
7746 "BadL2: Illegal L2 opcode",
7747[7] =
7748 "BadSC: Unsupported SC",
7749[9] =
7750 "BadRC: Illegal RC",
7751[11] =
7752 "PreemptError: Preempting with same VL",
7753[12] =
7754 "PreemptVL15: Preempting a VL15 packet",
7755};
7756
7757#define OPA_LDR_FMCONFIG_OFFSET 16
7758#define OPA_LDR_PORTRCV_OFFSET 0
7759static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7760{
7761 u64 info, hdr0, hdr1;
7762 const char *extra;
7763 char buf[96];
7764 struct hfi1_pportdata *ppd = dd->pport;
7765 u8 lcl_reason = 0;
7766 int do_bounce = 0;
7767
7768 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7769 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7770 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7771 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7772 /* set status bit */
7773 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7774 }
7775 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7776 }
7777
7778 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7779 struct hfi1_pportdata *ppd = dd->pport;
7780 /* this counter saturates at (2^32) - 1 */
7781 if (ppd->link_downed < (u32)UINT_MAX)
7782 ppd->link_downed++;
7783 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7784 }
7785
7786 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7787 u8 reason_valid = 1;
7788
7789 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7790 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7791 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7792 /* set status bit */
7793 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7794 }
7795 switch (info) {
7796 case 0:
7797 case 1:
7798 case 2:
7799 case 3:
7800 case 4:
7801 case 5:
7802 case 6:
7803 extra = fm_config_txt[info];
7804 break;
7805 case 8:
7806 extra = fm_config_txt[info];
7807 if (ppd->port_error_action &
7808 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7809 do_bounce = 1;
7810 /*
7811 * lcl_reason cannot be derived from info
7812 * for this error
7813 */
7814 lcl_reason =
7815 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7816 }
7817 break;
7818 default:
7819 reason_valid = 0;
7820 snprintf(buf, sizeof(buf), "reserved%lld", info);
7821 extra = buf;
7822 break;
7823 }
7824
7825 if (reason_valid && !do_bounce) {
7826 do_bounce = ppd->port_error_action &
7827 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7828 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7829 }
7830
7831 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007832 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7833 extra);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007834 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7835 }
7836
7837 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7838 u8 reason_valid = 1;
7839
7840 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7841 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7842 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7843 if (!(dd->err_info_rcvport.status_and_code &
7844 OPA_EI_STATUS_SMASK)) {
7845 dd->err_info_rcvport.status_and_code =
7846 info & OPA_EI_CODE_SMASK;
7847 /* set status bit */
7848 dd->err_info_rcvport.status_and_code |=
7849 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007850 /*
7851 * save first 2 flits in the packet that caused
7852 * the error
7853 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007854 dd->err_info_rcvport.packet_flit1 = hdr0;
7855 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007856 }
7857 switch (info) {
7858 case 1:
7859 case 2:
7860 case 3:
7861 case 4:
7862 case 5:
7863 case 6:
7864 case 7:
7865 case 9:
7866 case 11:
7867 case 12:
7868 extra = port_rcv_txt[info];
7869 break;
7870 default:
7871 reason_valid = 0;
7872 snprintf(buf, sizeof(buf), "reserved%lld", info);
7873 extra = buf;
7874 break;
7875 }
7876
7877 if (reason_valid && !do_bounce) {
7878 do_bounce = ppd->port_error_action &
7879 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7880 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7881 }
7882
7883 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007884 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7885 " hdr0 0x%llx, hdr1 0x%llx\n",
7886 extra, hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007887
7888 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7889 }
7890
7891 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7892 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007893 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007894 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7895 }
7896 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7897 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007898 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007899 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7900 }
7901
Don Hiatt243d9f42017-03-20 17:26:20 -07007902 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
7903 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
7904
Mike Marciniszyn77241052015-07-30 15:17:43 -04007905 /* report any remaining errors */
7906 if (reg)
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007907 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
7908 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007909
7910 if (lcl_reason == 0)
7911 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7912
7913 if (do_bounce) {
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007914 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
7915 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007916 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7917 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7918 }
7919}
7920
7921static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7922{
7923 char buf[96];
7924
7925 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007926 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007927}
7928
7929/*
7930 * CCE block DC interrupt. Source is < 8.
7931 */
7932static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7933{
7934 const struct err_reg_info *eri = &dc_errs[source];
7935
7936 if (eri->handler) {
7937 interrupt_clear_down(dd, 0, eri);
7938 } else if (source == 3 /* dc_lbm_int */) {
7939 /*
7940 * This indicates that a parity error has occurred on the
7941 * address/control lines presented to the LBM. The error
7942 * is a single pulse, there is no associated error flag,
7943 * and it is non-maskable. This is because if a parity
7944 * error occurs on the request the request is dropped.
7945 * This should never occur, but it is nice to know if it
7946 * ever does.
7947 */
7948 dd_dev_err(dd, "Parity error in DC LBM block\n");
7949 } else {
7950 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7951 }
7952}
7953
7954/*
7955 * TX block send credit interrupt. Source is < 160.
7956 */
7957static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7958{
7959 sc_group_release_update(dd, source);
7960}
7961
7962/*
7963 * TX block SDMA interrupt. Source is < 48.
7964 *
7965 * SDMA interrupts are grouped by type:
7966 *
7967 * 0 - N-1 = SDma
7968 * N - 2N-1 = SDmaProgress
7969 * 2N - 3N-1 = SDmaIdle
7970 */
7971static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7972{
7973 /* what interrupt */
7974 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7975 /* which engine */
7976 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7977
7978#ifdef CONFIG_SDMA_VERBOSITY
7979 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7980 slashstrip(__FILE__), __LINE__, __func__);
7981 sdma_dumpstate(&dd->per_sdma[which]);
7982#endif
7983
7984 if (likely(what < 3 && which < dd->num_sdma)) {
7985 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7986 } else {
7987 /* should not happen */
7988 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7989 }
7990}
7991
7992/*
7993 * RX block receive available interrupt. Source is < 160.
7994 */
7995static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7996{
7997 struct hfi1_ctxtdata *rcd;
7998 char *err_detail;
7999
8000 if (likely(source < dd->num_rcv_contexts)) {
8001 rcd = dd->rcd[source];
8002 if (rcd) {
8003 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008004 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008005 else
8006 handle_user_interrupt(rcd);
8007 return; /* OK */
8008 }
8009 /* received an interrupt, but no rcd */
8010 err_detail = "dataless";
8011 } else {
8012 /* received an interrupt, but are not using that context */
8013 err_detail = "out of range";
8014 }
8015 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008016 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008017}
8018
8019/*
8020 * RX block receive urgent interrupt. Source is < 160.
8021 */
8022static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8023{
8024 struct hfi1_ctxtdata *rcd;
8025 char *err_detail;
8026
8027 if (likely(source < dd->num_rcv_contexts)) {
8028 rcd = dd->rcd[source];
8029 if (rcd) {
8030 /* only pay attention to user urgent interrupts */
8031 if (source >= dd->first_user_ctxt)
8032 handle_user_interrupt(rcd);
8033 return; /* OK */
8034 }
8035 /* received an interrupt, but no rcd */
8036 err_detail = "dataless";
8037 } else {
8038 /* received an interrupt, but are not using that context */
8039 err_detail = "out of range";
8040 }
8041 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008042 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008043}
8044
8045/*
8046 * Reserved range interrupt. Should not be called in normal operation.
8047 */
8048static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8049{
8050 char name[64];
8051
8052 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008053 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008054}
8055
8056static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008057/*
8058 * start end
8059 * name func interrupt func
8060 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008061{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8062 is_misc_err_name, is_misc_err_int },
8063{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8064 is_sdma_eng_err_name, is_sdma_eng_err_int },
8065{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8066 is_sendctxt_err_name, is_sendctxt_err_int },
8067{ IS_SDMA_START, IS_SDMA_END,
8068 is_sdma_eng_name, is_sdma_eng_int },
8069{ IS_VARIOUS_START, IS_VARIOUS_END,
8070 is_various_name, is_various_int },
8071{ IS_DC_START, IS_DC_END,
8072 is_dc_name, is_dc_int },
8073{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8074 is_rcv_avail_name, is_rcv_avail_int },
8075{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8076 is_rcv_urgent_name, is_rcv_urgent_int },
8077{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8078 is_send_credit_name, is_send_credit_int},
8079{ IS_RESERVED_START, IS_RESERVED_END,
8080 is_reserved_name, is_reserved_int},
8081};
8082
8083/*
8084 * Interrupt source interrupt - called when the given source has an interrupt.
8085 * Source is a bit index into an array of 64-bit integers.
8086 */
8087static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8088{
8089 const struct is_table *entry;
8090
8091 /* avoids a double compare by walking the table in-order */
8092 for (entry = &is_table[0]; entry->is_name; entry++) {
8093 if (source < entry->end) {
8094 trace_hfi1_interrupt(dd, entry, source);
8095 entry->is_int(dd, source - entry->start);
8096 return;
8097 }
8098 }
8099 /* fell off the end */
8100 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8101}
8102
8103/*
8104 * General interrupt handler. This is able to correctly handle
8105 * all interrupts in case INTx is used.
8106 */
8107static irqreturn_t general_interrupt(int irq, void *data)
8108{
8109 struct hfi1_devdata *dd = data;
8110 u64 regs[CCE_NUM_INT_CSRS];
8111 u32 bit;
8112 int i;
8113
8114 this_cpu_inc(*dd->int_counter);
8115
8116 /* phase 1: scan and clear all handled interrupts */
8117 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8118 if (dd->gi_mask[i] == 0) {
8119 regs[i] = 0; /* used later */
8120 continue;
8121 }
8122 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8123 dd->gi_mask[i];
8124 /* only clear if anything is set */
8125 if (regs[i])
8126 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8127 }
8128
8129 /* phase 2: call the appropriate handler */
8130 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008131 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008132 is_interrupt(dd, bit);
8133 }
8134
8135 return IRQ_HANDLED;
8136}
8137
8138static irqreturn_t sdma_interrupt(int irq, void *data)
8139{
8140 struct sdma_engine *sde = data;
8141 struct hfi1_devdata *dd = sde->dd;
8142 u64 status;
8143
8144#ifdef CONFIG_SDMA_VERBOSITY
8145 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8146 slashstrip(__FILE__), __LINE__, __func__);
8147 sdma_dumpstate(sde);
8148#endif
8149
8150 this_cpu_inc(*dd->int_counter);
8151
8152 /* This read_csr is really bad in the hot path */
8153 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008154 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8155 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008156 if (likely(status)) {
8157 /* clear the interrupt(s) */
8158 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008159 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8160 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008161
8162 /* handle the interrupt(s) */
8163 sdma_engine_interrupt(sde, status);
8164 } else
8165 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008166 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008167
8168 return IRQ_HANDLED;
8169}
8170
8171/*
Dean Luickecd42f82016-02-03 14:35:14 -08008172 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8173 * to insure that the write completed. This does NOT guarantee that
8174 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008175 */
8176static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8177{
8178 struct hfi1_devdata *dd = rcd->dd;
8179 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8180
8181 mmiowb(); /* make sure everything before is written */
8182 write_csr(dd, addr, rcd->imask);
8183 /* force the above write on the chip and get a value back */
8184 (void)read_csr(dd, addr);
8185}
8186
8187/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008188void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008189{
8190 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8191}
8192
Dean Luickecd42f82016-02-03 14:35:14 -08008193/*
8194 * Return non-zero if a packet is present.
8195 *
8196 * This routine is called when rechecking for packets after the RcvAvail
8197 * interrupt has been cleared down. First, do a quick check of memory for
8198 * a packet present. If not found, use an expensive CSR read of the context
8199 * tail to determine the actual tail. The CSR read is necessary because there
8200 * is no method to push pending DMAs to memory other than an interrupt and we
8201 * are trying to determine if we need to force an interrupt.
8202 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008203static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8204{
Dean Luickecd42f82016-02-03 14:35:14 -08008205 u32 tail;
8206 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008207
Dean Luickecd42f82016-02-03 14:35:14 -08008208 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8209 present = (rcd->seq_cnt ==
8210 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8211 else /* is RDMA rtail */
8212 present = (rcd->head != get_rcvhdrtail(rcd));
8213
8214 if (present)
8215 return 1;
8216
8217 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8218 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8219 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008220}
8221
8222/*
8223 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8224 * This routine will try to handle packets immediately (latency), but if
8225 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008226 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008227 * invoked) is finished. The intent is to avoid extra interrupts while we
8228 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008229 */
8230static irqreturn_t receive_context_interrupt(int irq, void *data)
8231{
8232 struct hfi1_ctxtdata *rcd = data;
8233 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008234 int disposition;
8235 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008236
8237 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8238 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008239 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008240
Dean Luickf4f30031c2015-10-26 10:28:44 -04008241 /* receive interrupt remains blocked while processing packets */
8242 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008243
Dean Luickf4f30031c2015-10-26 10:28:44 -04008244 /*
8245 * Too many packets were seen while processing packets in this
8246 * IRQ handler. Invoke the handler thread. The receive interrupt
8247 * remains blocked.
8248 */
8249 if (disposition == RCV_PKT_LIMIT)
8250 return IRQ_WAKE_THREAD;
8251
8252 /*
8253 * The packet processor detected no more packets. Clear the receive
8254 * interrupt and recheck for a packet packet that may have arrived
8255 * after the previous check and interrupt clear. If a packet arrived,
8256 * force another interrupt.
8257 */
8258 clear_recv_intr(rcd);
8259 present = check_packet_present(rcd);
8260 if (present)
8261 force_recv_intr(rcd);
8262
8263 return IRQ_HANDLED;
8264}
8265
8266/*
8267 * Receive packet thread handler. This expects to be invoked with the
8268 * receive interrupt still blocked.
8269 */
8270static irqreturn_t receive_context_thread(int irq, void *data)
8271{
8272 struct hfi1_ctxtdata *rcd = data;
8273 int present;
8274
8275 /* receive interrupt is still blocked from the IRQ handler */
8276 (void)rcd->do_interrupt(rcd, 1);
8277
8278 /*
8279 * The packet processor will only return if it detected no more
8280 * packets. Hold IRQs here so we can safely clear the interrupt and
8281 * recheck for a packet that may have arrived after the previous
8282 * check and the interrupt clear. If a packet arrived, force another
8283 * interrupt.
8284 */
8285 local_irq_disable();
8286 clear_recv_intr(rcd);
8287 present = check_packet_present(rcd);
8288 if (present)
8289 force_recv_intr(rcd);
8290 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008291
8292 return IRQ_HANDLED;
8293}
8294
8295/* ========================================================================= */
8296
8297u32 read_physical_state(struct hfi1_devdata *dd)
8298{
8299 u64 reg;
8300
8301 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8302 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8303 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8304}
8305
Jim Snowfb9036d2016-01-11 18:32:21 -05008306u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008307{
8308 u64 reg;
8309
8310 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8311 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8312 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8313}
8314
8315static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8316{
8317 u64 reg;
8318
8319 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8320 /* clear current state, set new state */
8321 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8322 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8323 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8324}
8325
8326/*
8327 * Use the 8051 to read a LCB CSR.
8328 */
8329static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8330{
8331 u32 regno;
8332 int ret;
8333
8334 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8335 if (acquire_lcb_access(dd, 0) == 0) {
8336 *data = read_csr(dd, addr);
8337 release_lcb_access(dd, 0);
8338 return 0;
8339 }
8340 return -EBUSY;
8341 }
8342
8343 /* register is an index of LCB registers: (offset - base) / 8 */
8344 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8345 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8346 if (ret != HCMD_SUCCESS)
8347 return -EBUSY;
8348 return 0;
8349}
8350
8351/*
Michael J. Ruhl86884262017-03-20 17:24:51 -07008352 * Provide a cache for some of the LCB registers in case the LCB is
8353 * unavailable.
8354 * (The LCB is unavailable in certain link states, for example.)
8355 */
8356struct lcb_datum {
8357 u32 off;
8358 u64 val;
8359};
8360
8361static struct lcb_datum lcb_cache[] = {
8362 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8363 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8364 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8365};
8366
8367static void update_lcb_cache(struct hfi1_devdata *dd)
8368{
8369 int i;
8370 int ret;
8371 u64 val;
8372
8373 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8374 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8375
8376 /* Update if we get good data */
8377 if (likely(ret != -EBUSY))
8378 lcb_cache[i].val = val;
8379 }
8380}
8381
8382static int read_lcb_cache(u32 off, u64 *val)
8383{
8384 int i;
8385
8386 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8387 if (lcb_cache[i].off == off) {
8388 *val = lcb_cache[i].val;
8389 return 0;
8390 }
8391 }
8392
8393 pr_warn("%s bad offset 0x%x\n", __func__, off);
8394 return -1;
8395}
8396
8397/*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008398 * Read an LCB CSR. Access may not be in host control, so check.
8399 * Return 0 on success, -EBUSY on failure.
8400 */
8401int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8402{
8403 struct hfi1_pportdata *ppd = dd->pport;
8404
8405 /* if up, go through the 8051 for the value */
8406 if (ppd->host_link_state & HLS_UP)
8407 return read_lcb_via_8051(dd, addr, data);
Michael J. Ruhl86884262017-03-20 17:24:51 -07008408 /* if going up or down, check the cache, otherwise, no access */
8409 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8410 if (read_lcb_cache(addr, data))
8411 return -EBUSY;
8412 return 0;
8413 }
8414
Mike Marciniszyn77241052015-07-30 15:17:43 -04008415 /* otherwise, host has access */
8416 *data = read_csr(dd, addr);
8417 return 0;
8418}
8419
8420/*
8421 * Use the 8051 to write a LCB CSR.
8422 */
8423static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8424{
Dean Luick3bf40d62015-11-06 20:07:04 -05008425 u32 regno;
8426 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008427
Dean Luick3bf40d62015-11-06 20:07:04 -05008428 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008429 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
Dean Luick3bf40d62015-11-06 20:07:04 -05008430 if (acquire_lcb_access(dd, 0) == 0) {
8431 write_csr(dd, addr, data);
8432 release_lcb_access(dd, 0);
8433 return 0;
8434 }
8435 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008436 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008437
8438 /* register is an index of LCB registers: (offset - base) / 8 */
8439 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8440 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8441 if (ret != HCMD_SUCCESS)
8442 return -EBUSY;
8443 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008444}
8445
8446/*
8447 * Write an LCB CSR. Access may not be in host control, so check.
8448 * Return 0 on success, -EBUSY on failure.
8449 */
8450int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8451{
8452 struct hfi1_pportdata *ppd = dd->pport;
8453
8454 /* if up, go through the 8051 for the value */
8455 if (ppd->host_link_state & HLS_UP)
8456 return write_lcb_via_8051(dd, addr, data);
8457 /* if going up or down, no access */
8458 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8459 return -EBUSY;
8460 /* otherwise, host has access */
8461 write_csr(dd, addr, data);
8462 return 0;
8463}
8464
8465/*
8466 * Returns:
8467 * < 0 = Linux error, not able to get access
8468 * > 0 = 8051 command RETURN_CODE
8469 */
8470static int do_8051_command(
8471 struct hfi1_devdata *dd,
8472 u32 type,
8473 u64 in_data,
8474 u64 *out_data)
8475{
8476 u64 reg, completed;
8477 int return_code;
8478 unsigned long flags;
8479 unsigned long timeout;
8480
8481 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8482
8483 /*
8484 * Alternative to holding the lock for a long time:
8485 * - keep busy wait - have other users bounce off
8486 */
8487 spin_lock_irqsave(&dd->dc8051_lock, flags);
8488
8489 /* We can't send any commands to the 8051 if it's in reset */
8490 if (dd->dc_shutdown) {
8491 return_code = -ENODEV;
8492 goto fail;
8493 }
8494
8495 /*
8496 * If an 8051 host command timed out previously, then the 8051 is
8497 * stuck.
8498 *
8499 * On first timeout, attempt to reset and restart the entire DC
8500 * block (including 8051). (Is this too big of a hammer?)
8501 *
8502 * If the 8051 times out a second time, the reset did not bring it
8503 * back to healthy life. In that case, fail any subsequent commands.
8504 */
8505 if (dd->dc8051_timed_out) {
8506 if (dd->dc8051_timed_out > 1) {
8507 dd_dev_err(dd,
8508 "Previous 8051 host command timed out, skipping command %u\n",
8509 type);
8510 return_code = -ENXIO;
8511 goto fail;
8512 }
8513 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8514 dc_shutdown(dd);
8515 dc_start(dd);
8516 spin_lock_irqsave(&dd->dc8051_lock, flags);
8517 }
8518
8519 /*
8520 * If there is no timeout, then the 8051 command interface is
8521 * waiting for a command.
8522 */
8523
8524 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008525 * When writing a LCB CSR, out_data contains the full value to
8526 * to be written, while in_data contains the relative LCB
8527 * address in 7:0. Do the work here, rather than the caller,
8528 * of distrubting the write data to where it needs to go:
8529 *
8530 * Write data
8531 * 39:00 -> in_data[47:8]
8532 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8533 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8534 */
8535 if (type == HCMD_WRITE_LCB_CSR) {
8536 in_data |= ((*out_data) & 0xffffffffffull) << 8;
Dean Luick00801672016-12-07 19:33:40 -08008537 /* must preserve COMPLETED - it is tied to hardware */
8538 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8539 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8540 reg |= ((((*out_data) >> 40) & 0xff) <<
Dean Luick3bf40d62015-11-06 20:07:04 -05008541 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8542 | ((((*out_data) >> 48) & 0xffff) <<
8543 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8544 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8545 }
8546
8547 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008548 * Do two writes: the first to stabilize the type and req_data, the
8549 * second to activate.
8550 */
8551 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8552 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8553 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8554 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8555 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8556 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8557 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8558
8559 /* wait for completion, alternate: interrupt */
8560 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8561 while (1) {
8562 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8563 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8564 if (completed)
8565 break;
8566 if (time_after(jiffies, timeout)) {
8567 dd->dc8051_timed_out++;
8568 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8569 if (out_data)
8570 *out_data = 0;
8571 return_code = -ETIMEDOUT;
8572 goto fail;
8573 }
8574 udelay(2);
8575 }
8576
8577 if (out_data) {
8578 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8579 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8580 if (type == HCMD_READ_LCB_CSR) {
8581 /* top 16 bits are in a different register */
8582 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8583 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8584 << (48
8585 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8586 }
8587 }
8588 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8589 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8590 dd->dc8051_timed_out = 0;
8591 /*
8592 * Clear command for next user.
8593 */
8594 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8595
8596fail:
8597 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8598
8599 return return_code;
8600}
8601
8602static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8603{
8604 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8605}
8606
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008607int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8608 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008609{
8610 u64 data;
8611 int ret;
8612
8613 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8614 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8615 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8616 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8617 if (ret != HCMD_SUCCESS) {
8618 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008619 "load 8051 config: field id %d, lane %d, err %d\n",
8620 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008621 }
8622 return ret;
8623}
8624
8625/*
8626 * Read the 8051 firmware "registers". Use the RAM directly. Always
8627 * set the result, even on error.
8628 * Return 0 on success, -errno on failure
8629 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008630int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8631 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008632{
8633 u64 big_data;
8634 u32 addr;
8635 int ret;
8636
8637 /* address start depends on the lane_id */
8638 if (lane_id < 4)
8639 addr = (4 * NUM_GENERAL_FIELDS)
8640 + (lane_id * 4 * NUM_LANE_FIELDS);
8641 else
8642 addr = 0;
8643 addr += field_id * 4;
8644
8645 /* read is in 8-byte chunks, hardware will truncate the address down */
8646 ret = read_8051_data(dd, addr, 8, &big_data);
8647
8648 if (ret == 0) {
8649 /* extract the 4 bytes we want */
8650 if (addr & 0x4)
8651 *result = (u32)(big_data >> 32);
8652 else
8653 *result = (u32)big_data;
8654 } else {
8655 *result = 0;
8656 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008657 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008658 }
8659
8660 return ret;
8661}
8662
8663static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8664 u8 continuous)
8665{
8666 u32 frame;
8667
8668 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8669 | power_management << POWER_MANAGEMENT_SHIFT;
8670 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8671 GENERAL_CONFIG, frame);
8672}
8673
8674static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8675 u16 vl15buf, u8 crc_sizes)
8676{
8677 u32 frame;
8678
8679 frame = (u32)vau << VAU_SHIFT
8680 | (u32)z << Z_SHIFT
8681 | (u32)vcu << VCU_SHIFT
8682 | (u32)vl15buf << VL15BUF_SHIFT
8683 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8684 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8685 GENERAL_CONFIG, frame);
8686}
8687
8688static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8689 u8 *flag_bits, u16 *link_widths)
8690{
8691 u32 frame;
8692
8693 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008694 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008695 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8696 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8697 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8698}
8699
8700static int write_vc_local_link_width(struct hfi1_devdata *dd,
8701 u8 misc_bits,
8702 u8 flag_bits,
8703 u16 link_widths)
8704{
8705 u32 frame;
8706
8707 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8708 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8709 | (u32)link_widths << LINK_WIDTH_SHIFT;
8710 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8711 frame);
8712}
8713
8714static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8715 u8 device_rev)
8716{
8717 u32 frame;
8718
8719 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8720 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8721 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8722}
8723
8724static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8725 u8 *device_rev)
8726{
8727 u32 frame;
8728
8729 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8730 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8731 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8732 & REMOTE_DEVICE_REV_MASK;
8733}
8734
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008735void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8736 u8 *ver_patch)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008737{
8738 u32 frame;
8739
8740 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008741 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8742 STS_FM_VERSION_MAJOR_MASK;
8743 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8744 STS_FM_VERSION_MINOR_MASK;
8745
8746 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8747 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8748 STS_FM_VERSION_PATCH_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008749}
8750
8751static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8752 u8 *continuous)
8753{
8754 u32 frame;
8755
8756 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8757 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8758 & POWER_MANAGEMENT_MASK;
8759 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8760 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8761}
8762
8763static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8764 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8765{
8766 u32 frame;
8767
8768 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8769 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8770 *z = (frame >> Z_SHIFT) & Z_MASK;
8771 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8772 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8773 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8774}
8775
8776static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8777 u8 *remote_tx_rate,
8778 u16 *link_widths)
8779{
8780 u32 frame;
8781
8782 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008783 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008784 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8785 & REMOTE_TX_RATE_MASK;
8786 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8787}
8788
8789static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8790{
8791 u32 frame;
8792
8793 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8794 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8795}
8796
8797static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8798{
8799 u32 frame;
8800
8801 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8802 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8803}
8804
8805static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8806{
8807 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8808}
8809
8810static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8811{
8812 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8813}
8814
8815void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8816{
8817 u32 frame;
8818 int ret;
8819
8820 *link_quality = 0;
8821 if (dd->pport->host_link_state & HLS_UP) {
8822 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008823 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008824 if (ret == 0)
8825 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8826 & LINK_QUALITY_MASK;
8827 }
8828}
8829
8830static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8831{
8832 u32 frame;
8833
8834 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8835 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8836}
8837
Dean Luickfeb831d2016-04-14 08:31:36 -07008838static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8839{
8840 u32 frame;
8841
8842 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8843 *ldr = (frame & 0xff);
8844}
8845
Mike Marciniszyn77241052015-07-30 15:17:43 -04008846static int read_tx_settings(struct hfi1_devdata *dd,
8847 u8 *enable_lane_tx,
8848 u8 *tx_polarity_inversion,
8849 u8 *rx_polarity_inversion,
8850 u8 *max_rate)
8851{
8852 u32 frame;
8853 int ret;
8854
8855 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8856 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8857 & ENABLE_LANE_TX_MASK;
8858 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8859 & TX_POLARITY_INVERSION_MASK;
8860 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8861 & RX_POLARITY_INVERSION_MASK;
8862 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8863 return ret;
8864}
8865
8866static int write_tx_settings(struct hfi1_devdata *dd,
8867 u8 enable_lane_tx,
8868 u8 tx_polarity_inversion,
8869 u8 rx_polarity_inversion,
8870 u8 max_rate)
8871{
8872 u32 frame;
8873
8874 /* no need to mask, all variable sizes match field widths */
8875 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8876 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8877 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8878 | max_rate << MAX_RATE_SHIFT;
8879 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8880}
8881
Mike Marciniszyn77241052015-07-30 15:17:43 -04008882/*
8883 * Read an idle LCB message.
8884 *
8885 * Returns 0 on success, -EINVAL on error
8886 */
8887static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8888{
8889 int ret;
8890
Jubin John17fb4f22016-02-14 20:21:52 -08008891 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008892 if (ret != HCMD_SUCCESS) {
8893 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008894 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008895 return -EINVAL;
8896 }
8897 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8898 /* return only the payload as we already know the type */
8899 *data_out >>= IDLE_PAYLOAD_SHIFT;
8900 return 0;
8901}
8902
8903/*
8904 * Read an idle SMA message. To be done in response to a notification from
8905 * the 8051.
8906 *
8907 * Returns 0 on success, -EINVAL on error
8908 */
8909static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8910{
Jubin John17fb4f22016-02-14 20:21:52 -08008911 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8912 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008913}
8914
8915/*
8916 * Send an idle LCB message.
8917 *
8918 * Returns 0 on success, -EINVAL on error
8919 */
8920static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8921{
8922 int ret;
8923
8924 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8925 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8926 if (ret != HCMD_SUCCESS) {
8927 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008928 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008929 return -EINVAL;
8930 }
8931 return 0;
8932}
8933
8934/*
8935 * Send an idle SMA message.
8936 *
8937 * Returns 0 on success, -EINVAL on error
8938 */
8939int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8940{
8941 u64 data;
8942
Jubin John17fb4f22016-02-14 20:21:52 -08008943 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8944 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008945 return send_idle_message(dd, data);
8946}
8947
8948/*
8949 * Initialize the LCB then do a quick link up. This may or may not be
8950 * in loopback.
8951 *
8952 * return 0 on success, -errno on error
8953 */
8954static int do_quick_linkup(struct hfi1_devdata *dd)
8955{
Mike Marciniszyn77241052015-07-30 15:17:43 -04008956 int ret;
8957
8958 lcb_shutdown(dd, 0);
8959
8960 if (loopback) {
8961 /* LCB_CFG_LOOPBACK.VAL = 2 */
8962 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8963 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008964 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008965 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8966 }
8967
8968 /* start the LCBs */
8969 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8970 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8971
8972 /* simulator only loopback steps */
8973 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8974 /* LCB_CFG_RUN.EN = 1 */
8975 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008976 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008977
Dean Luickec8a1422017-03-20 17:24:39 -07008978 ret = wait_link_transfer_active(dd, 10);
8979 if (ret)
8980 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008981
8982 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008983 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008984 }
8985
8986 if (!loopback) {
8987 /*
8988 * When doing quick linkup and not in loopback, both
8989 * sides must be done with LCB set-up before either
8990 * starts the quick linkup. Put a delay here so that
8991 * both sides can be started and have a chance to be
8992 * done with LCB set up before resuming.
8993 */
8994 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008995 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008996 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008997 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008998 }
8999
9000 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9001 set_8051_lcb_access(dd);
9002
9003 /*
9004 * State "quick" LinkUp request sets the physical link state to
9005 * LinkUp without a verify capability sequence.
9006 * This state is in simulator v37 and later.
9007 */
9008 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9009 if (ret != HCMD_SUCCESS) {
9010 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009011 "%s: set physical link state to quick LinkUp failed with return %d\n",
9012 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009013
9014 set_host_lcb_access(dd);
9015 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9016
9017 if (ret >= 0)
9018 ret = -EINVAL;
9019 return ret;
9020 }
9021
9022 return 0; /* success */
9023}
9024
9025/*
9026 * Set the SerDes to internal loopback mode.
9027 * Returns 0 on success, -errno on error.
9028 */
9029static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9030{
9031 int ret;
9032
9033 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9034 if (ret == HCMD_SUCCESS)
9035 return 0;
9036 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009037 "Set physical link state to SerDes Loopback failed with return %d\n",
9038 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009039 if (ret >= 0)
9040 ret = -EINVAL;
9041 return ret;
9042}
9043
9044/*
9045 * Do all special steps to set up loopback.
9046 */
9047static int init_loopback(struct hfi1_devdata *dd)
9048{
9049 dd_dev_info(dd, "Entering loopback mode\n");
9050
9051 /* all loopbacks should disable self GUID check */
9052 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009053 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009054
9055 /*
9056 * The simulator has only one loopback option - LCB. Switch
9057 * to that option, which includes quick link up.
9058 *
9059 * Accept all valid loopback values.
9060 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009061 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9062 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9063 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009064 loopback = LOOPBACK_LCB;
9065 quick_linkup = 1;
9066 return 0;
9067 }
9068
9069 /* handle serdes loopback */
9070 if (loopback == LOOPBACK_SERDES) {
9071 /* internal serdes loopack needs quick linkup on RTL */
9072 if (dd->icode == ICODE_RTL_SILICON)
9073 quick_linkup = 1;
9074 return set_serdes_loopback_mode(dd);
9075 }
9076
9077 /* LCB loopback - handled at poll time */
9078 if (loopback == LOOPBACK_LCB) {
9079 quick_linkup = 1; /* LCB is always quick linkup */
9080
9081 /* not supported in emulation due to emulation RTL changes */
9082 if (dd->icode == ICODE_FPGA_EMULATION) {
9083 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009084 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009085 return -EINVAL;
9086 }
9087 return 0;
9088 }
9089
9090 /* external cable loopback requires no extra steps */
9091 if (loopback == LOOPBACK_CABLE)
9092 return 0;
9093
9094 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9095 return -EINVAL;
9096}
9097
9098/*
9099 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9100 * used in the Verify Capability link width attribute.
9101 */
9102static u16 opa_to_vc_link_widths(u16 opa_widths)
9103{
9104 int i;
9105 u16 result = 0;
9106
9107 static const struct link_bits {
9108 u16 from;
9109 u16 to;
9110 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009111 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9112 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9113 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9114 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009115 };
9116
9117 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9118 if (opa_widths & opa_link_xlate[i].from)
9119 result |= opa_link_xlate[i].to;
9120 }
9121 return result;
9122}
9123
9124/*
9125 * Set link attributes before moving to polling.
9126 */
9127static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9128{
9129 struct hfi1_devdata *dd = ppd->dd;
9130 u8 enable_lane_tx;
9131 u8 tx_polarity_inversion;
9132 u8 rx_polarity_inversion;
9133 int ret;
9134
9135 /* reset our fabric serdes to clear any lingering problems */
9136 fabric_serdes_reset(dd);
9137
9138 /* set the local tx rate - need to read-modify-write */
9139 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009140 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009141 if (ret)
9142 goto set_local_link_attributes_fail;
9143
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07009144 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009145 /* set the tx rate to the fastest enabled */
9146 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9147 ppd->local_tx_rate = 1;
9148 else
9149 ppd->local_tx_rate = 0;
9150 } else {
9151 /* set the tx rate to all enabled */
9152 ppd->local_tx_rate = 0;
9153 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9154 ppd->local_tx_rate |= 2;
9155 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9156 ppd->local_tx_rate |= 1;
9157 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009158
9159 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009160 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009161 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009162 if (ret != HCMD_SUCCESS)
9163 goto set_local_link_attributes_fail;
9164
9165 /*
9166 * DC supports continuous updates.
9167 */
Jubin John17fb4f22016-02-14 20:21:52 -08009168 ret = write_vc_local_phy(dd,
9169 0 /* no power management */,
9170 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009171 if (ret != HCMD_SUCCESS)
9172 goto set_local_link_attributes_fail;
9173
9174 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9175 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9176 ppd->port_crc_mode_enabled);
9177 if (ret != HCMD_SUCCESS)
9178 goto set_local_link_attributes_fail;
9179
9180 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009181 opa_to_vc_link_widths(
9182 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009183 if (ret != HCMD_SUCCESS)
9184 goto set_local_link_attributes_fail;
9185
9186 /* let peer know who we are */
9187 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9188 if (ret == HCMD_SUCCESS)
9189 return 0;
9190
9191set_local_link_attributes_fail:
9192 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009193 "Failed to set local link attributes, return 0x%x\n",
9194 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009195 return ret;
9196}
9197
9198/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009199 * Call this to start the link.
9200 * Do not do anything if the link is disabled.
9201 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009202 */
9203int start_link(struct hfi1_pportdata *ppd)
9204{
Dean Luick0db9dec2016-09-06 04:35:20 -07009205 /*
9206 * Tune the SerDes to a ballpark setting for optimal signal and bit
9207 * error rate. Needs to be done before starting the link.
9208 */
9209 tune_serdes(ppd);
9210
Mike Marciniszyn77241052015-07-30 15:17:43 -04009211 if (!ppd->link_enabled) {
9212 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009213 "%s: stopping link start because link is disabled\n",
9214 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009215 return 0;
9216 }
9217 if (!ppd->driver_link_ready) {
9218 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009219 "%s: stopping link start because driver is not ready\n",
9220 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009221 return 0;
9222 }
9223
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009224 /*
9225 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9226 * pkey table can be configured properly if the HFI unit is connected
9227 * to switch port with MgmtAllowed=NO
9228 */
9229 clear_full_mgmt_pkey(ppd);
9230
Easwar Hariharan623bba22016-04-12 11:25:57 -07009231 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009232}
9233
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009234static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9235{
9236 struct hfi1_devdata *dd = ppd->dd;
9237 u64 mask;
9238 unsigned long timeout;
9239
9240 /*
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009241 * Some QSFP cables have a quirk that asserts the IntN line as a side
9242 * effect of power up on plug-in. We ignore this false positive
9243 * interrupt until the module has finished powering up by waiting for
9244 * a minimum timeout of the module inrush initialization time of
9245 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9246 * module have stabilized.
9247 */
9248 msleep(500);
9249
9250 /*
9251 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009252 */
9253 timeout = jiffies + msecs_to_jiffies(2000);
9254 while (1) {
9255 mask = read_csr(dd, dd->hfi1_id ?
9256 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009257 if (!(mask & QSFP_HFI0_INT_N))
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009258 break;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009259 if (time_after(jiffies, timeout)) {
9260 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9261 __func__);
9262 break;
9263 }
9264 udelay(2);
9265 }
9266}
9267
9268static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9269{
9270 struct hfi1_devdata *dd = ppd->dd;
9271 u64 mask;
9272
9273 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009274 if (enable) {
9275 /*
9276 * Clear the status register to avoid an immediate interrupt
9277 * when we re-enable the IntN pin
9278 */
9279 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9280 QSFP_HFI0_INT_N);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009281 mask |= (u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009282 } else {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009283 mask &= ~(u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009284 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009285 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9286}
9287
9288void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009289{
9290 struct hfi1_devdata *dd = ppd->dd;
9291 u64 mask, qsfp_mask;
9292
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009293 /* Disable INT_N from triggering QSFP interrupts */
9294 set_qsfp_int_n(ppd, 0);
9295
9296 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009297 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009298
9299 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009300 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009301 qsfp_mask &= ~mask;
9302 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009303 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009304
9305 udelay(10);
9306
9307 qsfp_mask |= mask;
9308 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009309 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009310
9311 wait_for_qsfp_init(ppd);
9312
9313 /*
9314 * Allow INT_N to trigger the QSFP interrupt to watch
9315 * for alarms and warnings
9316 */
9317 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009318}
9319
9320static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9321 u8 *qsfp_interrupt_status)
9322{
9323 struct hfi1_devdata *dd = ppd->dd;
9324
9325 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009326 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9327 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9328 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009329
9330 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009331 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9332 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9333 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009334
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009335 /*
9336 * The remaining alarms/warnings don't matter if the link is down.
9337 */
9338 if (ppd->host_link_state & HLS_DOWN)
9339 return 0;
9340
Mike Marciniszyn77241052015-07-30 15:17:43 -04009341 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009342 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9343 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9344 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009345
9346 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009347 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9348 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9349 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009350
9351 /* Byte 2 is vendor specific */
9352
9353 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009354 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9355 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9356 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009357
9358 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009359 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9360 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9361 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009362
9363 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009364 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9365 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9366 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009367
9368 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009369 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9370 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9371 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009372
9373 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009374 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9375 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9376 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009377
9378 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009379 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9380 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9381 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009382
9383 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009384 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9385 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9386 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009387
9388 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009389 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9390 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9391 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009392
9393 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009394 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9395 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9396 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009397
9398 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009399 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9400 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9401 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009402
9403 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009404 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9405 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9406 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009407
9408 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009409 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9410 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9411 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009412
9413 /* Bytes 9-10 and 11-12 are reserved */
9414 /* Bytes 13-15 are vendor specific */
9415
9416 return 0;
9417}
9418
Easwar Hariharan623bba22016-04-12 11:25:57 -07009419/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009420void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009421{
9422 struct qsfp_data *qd;
9423 struct hfi1_pportdata *ppd;
9424 struct hfi1_devdata *dd;
9425
9426 qd = container_of(work, struct qsfp_data, qsfp_work);
9427 ppd = qd->ppd;
9428 dd = ppd->dd;
9429
9430 /* Sanity check */
9431 if (!qsfp_mod_present(ppd))
9432 return;
9433
9434 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009435 * Turn DC back on after cable has been re-inserted. Up until
9436 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009437 */
9438 dc_start(dd);
9439
9440 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009441 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009442
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009443 wait_for_qsfp_init(ppd);
9444
9445 /*
9446 * Allow INT_N to trigger the QSFP interrupt to watch
9447 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009448 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009449 set_qsfp_int_n(ppd, 1);
9450
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009451 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009452 }
9453
9454 if (qd->check_interrupt_flags) {
9455 u8 qsfp_interrupt_status[16] = {0,};
9456
Dean Luick765a6fa2016-03-05 08:50:06 -08009457 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9458 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009459 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009460 "%s: Failed to read status of QSFP module\n",
9461 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009462 } else {
9463 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009464
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009465 handle_qsfp_error_conditions(
9466 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009467 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9468 ppd->qsfp_info.check_interrupt_flags = 0;
9469 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009470 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009471 }
9472 }
9473}
9474
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009475static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009476{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009477 struct hfi1_pportdata *ppd = dd->pport;
9478 u64 qsfp_mask, cce_int_mask;
9479 const int qsfp1_int_smask = QSFP1_INT % 64;
9480 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009481
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009482 /*
9483 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9484 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9485 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9486 * the index of the appropriate CSR in the CCEIntMask CSR array
9487 */
9488 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9489 (8 * (QSFP1_INT / 64)));
9490 if (dd->hfi1_id) {
9491 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9492 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9493 cce_int_mask);
9494 } else {
9495 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9496 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9497 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009498 }
9499
Mike Marciniszyn77241052015-07-30 15:17:43 -04009500 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9501 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009502 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9503 qsfp_mask);
9504 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9505 qsfp_mask);
9506
9507 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009508
9509 /* Handle active low nature of INT_N and MODPRST_N pins */
9510 if (qsfp_mod_present(ppd))
9511 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9512 write_csr(dd,
9513 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9514 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009515}
9516
Dean Luickbbdeb332015-12-01 15:38:15 -05009517/*
9518 * Do a one-time initialize of the LCB block.
9519 */
9520static void init_lcb(struct hfi1_devdata *dd)
9521{
Dean Luicka59329d2016-02-03 14:32:31 -08009522 /* simulator does not correctly handle LCB cclk loopback, skip */
9523 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9524 return;
9525
Dean Luickbbdeb332015-12-01 15:38:15 -05009526 /* the DC has been reset earlier in the driver load */
9527
9528 /* set LCB for cclk loopback on the port */
9529 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9530 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9531 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9532 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9533 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9534 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9535 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9536}
9537
Dean Luick673b9752016-08-31 07:24:33 -07009538/*
9539 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9540 * on error.
9541 */
9542static int test_qsfp_read(struct hfi1_pportdata *ppd)
9543{
9544 int ret;
9545 u8 status;
9546
Easwar Hariharanfb897ad2017-03-20 17:25:42 -07009547 /*
9548 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9549 * not present
9550 */
9551 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
Dean Luick673b9752016-08-31 07:24:33 -07009552 return 0;
9553
9554 /* read byte 2, the status byte */
9555 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9556 if (ret < 0)
9557 return ret;
9558 if (ret != 1)
9559 return -EIO;
9560
9561 return 0; /* success */
9562}
9563
9564/*
9565 * Values for QSFP retry.
9566 *
9567 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9568 * arrived at from experience on a large cluster.
9569 */
9570#define MAX_QSFP_RETRIES 20
9571#define QSFP_RETRY_WAIT 500 /* msec */
9572
9573/*
9574 * Try a QSFP read. If it fails, schedule a retry for later.
9575 * Called on first link activation after driver load.
9576 */
9577static void try_start_link(struct hfi1_pportdata *ppd)
9578{
9579 if (test_qsfp_read(ppd)) {
9580 /* read failed */
9581 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9582 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9583 return;
9584 }
9585 dd_dev_info(ppd->dd,
9586 "QSFP not responding, waiting and retrying %d\n",
9587 (int)ppd->qsfp_retry_count);
9588 ppd->qsfp_retry_count++;
9589 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9590 msecs_to_jiffies(QSFP_RETRY_WAIT));
9591 return;
9592 }
9593 ppd->qsfp_retry_count = 0;
9594
Dean Luick673b9752016-08-31 07:24:33 -07009595 start_link(ppd);
9596}
9597
9598/*
9599 * Workqueue function to start the link after a delay.
9600 */
9601void handle_start_link(struct work_struct *work)
9602{
9603 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9604 start_link_work.work);
9605 try_start_link(ppd);
9606}
9607
Mike Marciniszyn77241052015-07-30 15:17:43 -04009608int bringup_serdes(struct hfi1_pportdata *ppd)
9609{
9610 struct hfi1_devdata *dd = ppd->dd;
9611 u64 guid;
9612 int ret;
9613
9614 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9615 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9616
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009617 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
Mike Marciniszyn77241052015-07-30 15:17:43 -04009618 if (!guid) {
9619 if (dd->base_guid)
9620 guid = dd->base_guid + ppd->port - 1;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009621 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009622 }
9623
Mike Marciniszyn77241052015-07-30 15:17:43 -04009624 /* Set linkinit_reason on power up per OPA spec */
9625 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9626
Dean Luickbbdeb332015-12-01 15:38:15 -05009627 /* one-time init of the LCB */
9628 init_lcb(dd);
9629
Mike Marciniszyn77241052015-07-30 15:17:43 -04009630 if (loopback) {
9631 ret = init_loopback(dd);
9632 if (ret < 0)
9633 return ret;
9634 }
9635
Easwar Hariharan9775a992016-05-12 10:22:39 -07009636 get_port_type(ppd);
9637 if (ppd->port_type == PORT_TYPE_QSFP) {
9638 set_qsfp_int_n(ppd, 0);
9639 wait_for_qsfp_init(ppd);
9640 set_qsfp_int_n(ppd, 1);
9641 }
9642
Dean Luick673b9752016-08-31 07:24:33 -07009643 try_start_link(ppd);
9644 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009645}
9646
9647void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9648{
9649 struct hfi1_devdata *dd = ppd->dd;
9650
9651 /*
9652 * Shut down the link and keep it down. First turn off that the
9653 * driver wants to allow the link to be up (driver_link_ready).
9654 * Then make sure the link is not automatically restarted
9655 * (link_enabled). Cancel any pending restart. And finally
9656 * go offline.
9657 */
9658 ppd->driver_link_ready = 0;
9659 ppd->link_enabled = 0;
9660
Dean Luick673b9752016-08-31 07:24:33 -07009661 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9662 flush_delayed_work(&ppd->start_link_work);
9663 cancel_delayed_work_sync(&ppd->start_link_work);
9664
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009665 ppd->offline_disabled_reason =
9666 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009667 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009668 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009669 set_link_state(ppd, HLS_DN_OFFLINE);
9670
9671 /* disable the port */
9672 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9673}
9674
9675static inline int init_cpu_counters(struct hfi1_devdata *dd)
9676{
9677 struct hfi1_pportdata *ppd;
9678 int i;
9679
9680 ppd = (struct hfi1_pportdata *)(dd + 1);
9681 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009682 ppd->ibport_data.rvp.rc_acks = NULL;
9683 ppd->ibport_data.rvp.rc_qacks = NULL;
9684 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9685 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9686 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9687 if (!ppd->ibport_data.rvp.rc_acks ||
9688 !ppd->ibport_data.rvp.rc_delayed_comp ||
9689 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009690 return -ENOMEM;
9691 }
9692
9693 return 0;
9694}
9695
9696static const char * const pt_names[] = {
9697 "expected",
9698 "eager",
9699 "invalid"
9700};
9701
9702static const char *pt_name(u32 type)
9703{
9704 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9705}
9706
9707/*
9708 * index is the index into the receive array
9709 */
9710void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9711 u32 type, unsigned long pa, u16 order)
9712{
9713 u64 reg;
9714 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9715 (dd->kregbase + RCV_ARRAY));
9716
9717 if (!(dd->flags & HFI1_PRESENT))
9718 goto done;
9719
9720 if (type == PT_INVALID) {
9721 pa = 0;
9722 } else if (type > PT_INVALID) {
9723 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009724 "unexpected receive array type %u for index %u, not handled\n",
9725 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009726 goto done;
9727 }
9728
9729 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9730 pt_name(type), index, pa, (unsigned long)order);
9731
9732#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9733 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9734 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9735 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9736 << RCV_ARRAY_RT_ADDR_SHIFT;
9737 writeq(reg, base + (index * 8));
9738
9739 if (type == PT_EAGER)
9740 /*
9741 * Eager entries are written one-by-one so we have to push them
9742 * after we write the entry.
9743 */
9744 flush_wc();
9745done:
9746 return;
9747}
9748
9749void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9750{
9751 struct hfi1_devdata *dd = rcd->dd;
9752 u32 i;
9753
9754 /* this could be optimized */
9755 for (i = rcd->eager_base; i < rcd->eager_base +
9756 rcd->egrbufs.alloced; i++)
9757 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9758
9759 for (i = rcd->expected_base;
9760 i < rcd->expected_base + rcd->expected_count; i++)
9761 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9762}
9763
Mike Marciniszyn261a4352016-09-06 04:35:05 -07009764struct ib_header *hfi1_get_msgheader(
9765 struct hfi1_devdata *dd, __le32 *rhf_addr)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009766{
9767 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9768
Mike Marciniszyn261a4352016-09-06 04:35:05 -07009769 return (struct ib_header *)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009770 (rhf_addr - dd->rhf_offset + offset);
9771}
9772
9773static const char * const ib_cfg_name_strings[] = {
9774 "HFI1_IB_CFG_LIDLMC",
9775 "HFI1_IB_CFG_LWID_DG_ENB",
9776 "HFI1_IB_CFG_LWID_ENB",
9777 "HFI1_IB_CFG_LWID",
9778 "HFI1_IB_CFG_SPD_ENB",
9779 "HFI1_IB_CFG_SPD",
9780 "HFI1_IB_CFG_RXPOL_ENB",
9781 "HFI1_IB_CFG_LREV_ENB",
9782 "HFI1_IB_CFG_LINKLATENCY",
9783 "HFI1_IB_CFG_HRTBT",
9784 "HFI1_IB_CFG_OP_VLS",
9785 "HFI1_IB_CFG_VL_HIGH_CAP",
9786 "HFI1_IB_CFG_VL_LOW_CAP",
9787 "HFI1_IB_CFG_OVERRUN_THRESH",
9788 "HFI1_IB_CFG_PHYERR_THRESH",
9789 "HFI1_IB_CFG_LINKDEFAULT",
9790 "HFI1_IB_CFG_PKEYS",
9791 "HFI1_IB_CFG_MTU",
9792 "HFI1_IB_CFG_LSTATE",
9793 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9794 "HFI1_IB_CFG_PMA_TICKS",
9795 "HFI1_IB_CFG_PORT"
9796};
9797
9798static const char *ib_cfg_name(int which)
9799{
9800 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9801 return "invalid";
9802 return ib_cfg_name_strings[which];
9803}
9804
9805int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9806{
9807 struct hfi1_devdata *dd = ppd->dd;
9808 int val = 0;
9809
9810 switch (which) {
9811 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9812 val = ppd->link_width_enabled;
9813 break;
9814 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9815 val = ppd->link_width_active;
9816 break;
9817 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9818 val = ppd->link_speed_enabled;
9819 break;
9820 case HFI1_IB_CFG_SPD: /* current Link speed */
9821 val = ppd->link_speed_active;
9822 break;
9823
9824 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9825 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9826 case HFI1_IB_CFG_LINKLATENCY:
9827 goto unimplemented;
9828
9829 case HFI1_IB_CFG_OP_VLS:
9830 val = ppd->vls_operational;
9831 break;
9832 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9833 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9834 break;
9835 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9836 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9837 break;
9838 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9839 val = ppd->overrun_threshold;
9840 break;
9841 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9842 val = ppd->phy_error_threshold;
9843 break;
9844 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9845 val = dd->link_default;
9846 break;
9847
9848 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9849 case HFI1_IB_CFG_PMA_TICKS:
9850 default:
9851unimplemented:
9852 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9853 dd_dev_info(
9854 dd,
9855 "%s: which %s: not implemented\n",
9856 __func__,
9857 ib_cfg_name(which));
9858 break;
9859 }
9860
9861 return val;
9862}
9863
9864/*
9865 * The largest MAD packet size.
9866 */
9867#define MAX_MAD_PACKET 2048
9868
9869/*
9870 * Return the maximum header bytes that can go on the _wire_
9871 * for this device. This count includes the ICRC which is
9872 * not part of the packet held in memory but it is appended
9873 * by the HW.
9874 * This is dependent on the device's receive header entry size.
9875 * HFI allows this to be set per-receive context, but the
9876 * driver presently enforces a global value.
9877 */
9878u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9879{
9880 /*
9881 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9882 * the Receive Header Entry Size minus the PBC (or RHF) size
9883 * plus one DW for the ICRC appended by HW.
9884 *
9885 * dd->rcd[0].rcvhdrqentsize is in DW.
9886 * We use rcd[0] as all context will have the same value. Also,
9887 * the first kernel context would have been allocated by now so
9888 * we are guaranteed a valid value.
9889 */
9890 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9891}
9892
9893/*
9894 * Set Send Length
9895 * @ppd - per port data
9896 *
9897 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9898 * registers compare against LRH.PktLen, so use the max bytes included
9899 * in the LRH.
9900 *
9901 * This routine changes all VL values except VL15, which it maintains at
9902 * the same value.
9903 */
9904static void set_send_length(struct hfi1_pportdata *ppd)
9905{
9906 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009907 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9908 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009909 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9910 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9911 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009912 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009913 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009914
9915 for (i = 0; i < ppd->vls_supported; i++) {
9916 if (dd->vld[i].mtu > maxvlmtu)
9917 maxvlmtu = dd->vld[i].mtu;
9918 if (i <= 3)
9919 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9920 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9921 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9922 else
9923 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9924 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9925 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9926 }
9927 write_csr(dd, SEND_LEN_CHECK0, len1);
9928 write_csr(dd, SEND_LEN_CHECK1, len2);
9929 /* adjust kernel credit return thresholds based on new MTUs */
9930 /* all kernel receive contexts have the same hdrqentsize */
9931 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009932 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9933 sc_mtu_to_threshold(dd->vld[i].sc,
9934 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009935 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009936 for (j = 0; j < INIT_SC_PER_VL; j++)
9937 sc_set_cr_threshold(
9938 pio_select_send_context_vl(dd, j, i),
9939 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009940 }
9941 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9942 sc_mtu_to_threshold(dd->vld[15].sc,
9943 dd->vld[15].mtu,
9944 dd->rcd[0]->rcvhdrqentsize));
9945 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009946
9947 /* Adjust maximum MTU for the port in DC */
9948 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9949 (ilog2(maxvlmtu >> 8) + 1);
9950 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9951 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9952 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9953 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9954 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9955}
9956
9957static void set_lidlmc(struct hfi1_pportdata *ppd)
9958{
9959 int i;
9960 u64 sreg = 0;
9961 struct hfi1_devdata *dd = ppd->dd;
9962 u32 mask = ~((1U << ppd->lmc) - 1);
9963 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9964
Mike Marciniszyn77241052015-07-30 15:17:43 -04009965 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9966 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9967 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009968 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009969 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9970 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9971 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9972
9973 /*
9974 * Iterate over all the send contexts and set their SLID check
9975 */
9976 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9977 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9978 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9979 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9980
9981 for (i = 0; i < dd->chip_send_contexts; i++) {
9982 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9983 i, (u32)sreg);
9984 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9985 }
9986
9987 /* Now we have to do the same thing for the sdma engines */
9988 sdma_update_lmc(dd, mask, ppd->lid);
9989}
9990
9991static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9992{
9993 unsigned long timeout;
9994 u32 curr_state;
9995
9996 timeout = jiffies + msecs_to_jiffies(msecs);
9997 while (1) {
9998 curr_state = read_physical_state(dd);
9999 if (curr_state == state)
10000 break;
10001 if (time_after(jiffies, timeout)) {
10002 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010003 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
10004 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010005 return -ETIMEDOUT;
10006 }
10007 usleep_range(1950, 2050); /* sleep 2ms-ish */
10008 }
10009
10010 return 0;
10011}
10012
Dean Luick6854c692016-07-25 13:38:56 -070010013static const char *state_completed_string(u32 completed)
10014{
10015 static const char * const state_completed[] = {
10016 "EstablishComm",
10017 "OptimizeEQ",
10018 "VerifyCap"
10019 };
10020
10021 if (completed < ARRAY_SIZE(state_completed))
10022 return state_completed[completed];
10023
10024 return "unknown";
10025}
10026
10027static const char all_lanes_dead_timeout_expired[] =
10028 "All lanes were inactive – was the interconnect media removed?";
10029static const char tx_out_of_policy[] =
10030 "Passing lanes on local port do not meet the local link width policy";
10031static const char no_state_complete[] =
10032 "State timeout occurred before link partner completed the state";
10033static const char * const state_complete_reasons[] = {
10034 [0x00] = "Reason unknown",
10035 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10036 [0x02] = "Link partner reported failure",
10037 [0x10] = "Unable to achieve frame sync on any lane",
10038 [0x11] =
10039 "Unable to find a common bit rate with the link partner",
10040 [0x12] =
10041 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10042 [0x13] =
10043 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10044 [0x14] = no_state_complete,
10045 [0x15] =
10046 "State timeout occurred before link partner identified equalization presets",
10047 [0x16] =
10048 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10049 [0x17] = tx_out_of_policy,
10050 [0x20] = all_lanes_dead_timeout_expired,
10051 [0x21] =
10052 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10053 [0x22] = no_state_complete,
10054 [0x23] =
10055 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10056 [0x24] = tx_out_of_policy,
10057 [0x30] = all_lanes_dead_timeout_expired,
10058 [0x31] =
10059 "State timeout occurred waiting for host to process received frames",
10060 [0x32] = no_state_complete,
10061 [0x33] =
10062 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10063 [0x34] = tx_out_of_policy,
10064};
10065
10066static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10067 u32 code)
10068{
10069 const char *str = NULL;
10070
10071 if (code < ARRAY_SIZE(state_complete_reasons))
10072 str = state_complete_reasons[code];
10073
10074 if (str)
10075 return str;
10076 return "Reserved";
10077}
10078
10079/* describe the given last state complete frame */
10080static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10081 const char *prefix)
10082{
10083 struct hfi1_devdata *dd = ppd->dd;
10084 u32 success;
10085 u32 state;
10086 u32 reason;
10087 u32 lanes;
10088
10089 /*
10090 * Decode frame:
10091 * [ 0: 0] - success
10092 * [ 3: 1] - state
10093 * [ 7: 4] - next state timeout
10094 * [15: 8] - reason code
10095 * [31:16] - lanes
10096 */
10097 success = frame & 0x1;
10098 state = (frame >> 1) & 0x7;
10099 reason = (frame >> 8) & 0xff;
10100 lanes = (frame >> 16) & 0xffff;
10101
10102 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10103 prefix, frame);
10104 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10105 state_completed_string(state), state);
10106 dd_dev_err(dd, " state successfully completed: %s\n",
10107 success ? "yes" : "no");
10108 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10109 reason, state_complete_reason_code_string(ppd, reason));
10110 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10111}
10112
10113/*
10114 * Read the last state complete frames and explain them. This routine
10115 * expects to be called if the link went down during link negotiation
10116 * and initialization (LNI). That is, anywhere between polling and link up.
10117 */
10118static void check_lni_states(struct hfi1_pportdata *ppd)
10119{
10120 u32 last_local_state;
10121 u32 last_remote_state;
10122
10123 read_last_local_state(ppd->dd, &last_local_state);
10124 read_last_remote_state(ppd->dd, &last_remote_state);
10125
10126 /*
10127 * Don't report anything if there is nothing to report. A value of
10128 * 0 means the link was taken down while polling and there was no
10129 * training in-process.
10130 */
10131 if (last_local_state == 0 && last_remote_state == 0)
10132 return;
10133
10134 decode_state_complete(ppd, last_local_state, "transmitted");
10135 decode_state_complete(ppd, last_remote_state, "received");
10136}
10137
Dean Luickec8a1422017-03-20 17:24:39 -070010138/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10139static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10140{
10141 u64 reg;
10142 unsigned long timeout;
10143
10144 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10145 timeout = jiffies + msecs_to_jiffies(wait_ms);
10146 while (1) {
10147 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10148 if (reg)
10149 break;
10150 if (time_after(jiffies, timeout)) {
10151 dd_dev_err(dd,
10152 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10153 return -ETIMEDOUT;
10154 }
10155 udelay(2);
10156 }
10157 return 0;
10158}
10159
10160/* called when the logical link state is not down as it should be */
10161static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10162{
10163 struct hfi1_devdata *dd = ppd->dd;
10164
10165 /*
10166 * Bring link up in LCB loopback
10167 */
10168 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10169 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10170 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10171
10172 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10173 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10174 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10175 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10176
10177 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10178 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10179 udelay(3);
10180 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10181 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10182
10183 wait_link_transfer_active(dd, 100);
10184
10185 /*
10186 * Bring the link down again.
10187 */
10188 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10189 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10190 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10191
10192 /* call again to adjust ppd->statusp, if needed */
10193 get_logical_state(ppd);
10194}
10195
Mike Marciniszyn77241052015-07-30 15:17:43 -040010196/*
10197 * Helper for set_link_state(). Do not call except from that routine.
10198 * Expects ppd->hls_mutex to be held.
10199 *
10200 * @rem_reason value to be sent to the neighbor
10201 *
10202 * LinkDownReasons only set if transition succeeds.
10203 */
10204static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10205{
10206 struct hfi1_devdata *dd = ppd->dd;
10207 u32 pstate, previous_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010208 int ret;
10209 int do_transition;
10210 int do_wait;
10211
Michael J. Ruhl86884262017-03-20 17:24:51 -070010212 update_lcb_cache(dd);
10213
Mike Marciniszyn77241052015-07-30 15:17:43 -040010214 previous_state = ppd->host_link_state;
10215 ppd->host_link_state = HLS_GOING_OFFLINE;
10216 pstate = read_physical_state(dd);
10217 if (pstate == PLS_OFFLINE) {
10218 do_transition = 0; /* in right state */
10219 do_wait = 0; /* ...no need to wait */
10220 } else if ((pstate & 0xff) == PLS_OFFLINE) {
10221 do_transition = 0; /* in an offline transient state */
10222 do_wait = 1; /* ...wait for it to settle */
10223 } else {
10224 do_transition = 1; /* need to move to offline */
10225 do_wait = 1; /* ...will need to wait */
10226 }
10227
10228 if (do_transition) {
10229 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -080010230 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010231
10232 if (ret != HCMD_SUCCESS) {
10233 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010234 "Failed to transition to Offline link state, return %d\n",
10235 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010236 return -EINVAL;
10237 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010238 if (ppd->offline_disabled_reason ==
10239 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010240 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -080010241 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010242 }
10243
10244 if (do_wait) {
10245 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -040010246 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010247 if (ret < 0)
10248 return ret;
10249 }
10250
Mike Marciniszyn77241052015-07-30 15:17:43 -040010251 /*
10252 * Now in charge of LCB - must be after the physical state is
10253 * offline.quiet and before host_link_state is changed.
10254 */
10255 set_host_lcb_access(dd);
10256 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
Dean Luickec8a1422017-03-20 17:24:39 -070010257
10258 /* make sure the logical state is also down */
10259 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10260 if (ret)
10261 force_logical_link_state_down(ppd);
10262
Mike Marciniszyn77241052015-07-30 15:17:43 -040010263 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10264
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010265 if (ppd->port_type == PORT_TYPE_QSFP &&
10266 ppd->qsfp_info.limiting_active &&
10267 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -080010268 int ret;
10269
10270 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10271 if (ret == 0) {
10272 set_qsfp_tx(ppd, 0);
10273 release_chip_resource(dd, qsfp_resource(dd));
10274 } else {
10275 /* not fatal, but should warn */
10276 dd_dev_err(dd,
10277 "Unable to acquire lock to turn off QSFP TX\n");
10278 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010279 }
10280
Mike Marciniszyn77241052015-07-30 15:17:43 -040010281 /*
10282 * The LNI has a mandatory wait time after the physical state
10283 * moves to Offline.Quiet. The wait time may be different
10284 * depending on how the link went down. The 8051 firmware
10285 * will observe the needed wait time and only move to ready
10286 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010287 * is 6s, so wait that long and then at least 0.5s more for
10288 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010289 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010290 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010291 if (ret) {
10292 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010293 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010294 /* state is really offline, so make it so */
10295 ppd->host_link_state = HLS_DN_OFFLINE;
10296 return ret;
10297 }
10298
10299 /*
10300 * The state is now offline and the 8051 is ready to accept host
10301 * requests.
10302 * - change our state
10303 * - notify others if we were previously in a linkup state
10304 */
10305 ppd->host_link_state = HLS_DN_OFFLINE;
10306 if (previous_state & HLS_UP) {
10307 /* went down while link was up */
10308 handle_linkup_change(dd, 0);
10309 } else if (previous_state
10310 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10311 /* went down while attempting link up */
Dean Luick6854c692016-07-25 13:38:56 -070010312 check_lni_states(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010313 }
10314
10315 /* the active link width (downgrade) is 0 on link down */
10316 ppd->link_width_active = 0;
10317 ppd->link_width_downgrade_tx_active = 0;
10318 ppd->link_width_downgrade_rx_active = 0;
10319 ppd->current_egress_rate = 0;
10320 return 0;
10321}
10322
10323/* return the link state name */
10324static const char *link_state_name(u32 state)
10325{
10326 const char *name;
10327 int n = ilog2(state);
10328 static const char * const names[] = {
10329 [__HLS_UP_INIT_BP] = "INIT",
10330 [__HLS_UP_ARMED_BP] = "ARMED",
10331 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10332 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10333 [__HLS_DN_POLL_BP] = "POLL",
10334 [__HLS_DN_DISABLE_BP] = "DISABLE",
10335 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10336 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10337 [__HLS_GOING_UP_BP] = "GOING_UP",
10338 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10339 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10340 };
10341
10342 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10343 return name ? name : "unknown";
10344}
10345
10346/* return the link state reason name */
10347static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10348{
10349 if (state == HLS_UP_INIT) {
10350 switch (ppd->linkinit_reason) {
10351 case OPA_LINKINIT_REASON_LINKUP:
10352 return "(LINKUP)";
10353 case OPA_LINKINIT_REASON_FLAPPING:
10354 return "(FLAPPING)";
10355 case OPA_LINKINIT_OUTSIDE_POLICY:
10356 return "(OUTSIDE_POLICY)";
10357 case OPA_LINKINIT_QUARANTINED:
10358 return "(QUARANTINED)";
10359 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10360 return "(INSUFIC_CAPABILITY)";
10361 default:
10362 break;
10363 }
10364 }
10365 return "";
10366}
10367
10368/*
10369 * driver_physical_state - convert the driver's notion of a port's
10370 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10371 * Return -1 (converted to a u32) to indicate error.
10372 */
10373u32 driver_physical_state(struct hfi1_pportdata *ppd)
10374{
10375 switch (ppd->host_link_state) {
10376 case HLS_UP_INIT:
10377 case HLS_UP_ARMED:
10378 case HLS_UP_ACTIVE:
10379 return IB_PORTPHYSSTATE_LINKUP;
10380 case HLS_DN_POLL:
10381 return IB_PORTPHYSSTATE_POLLING;
10382 case HLS_DN_DISABLE:
10383 return IB_PORTPHYSSTATE_DISABLED;
10384 case HLS_DN_OFFLINE:
10385 return OPA_PORTPHYSSTATE_OFFLINE;
10386 case HLS_VERIFY_CAP:
10387 return IB_PORTPHYSSTATE_POLLING;
10388 case HLS_GOING_UP:
10389 return IB_PORTPHYSSTATE_POLLING;
10390 case HLS_GOING_OFFLINE:
10391 return OPA_PORTPHYSSTATE_OFFLINE;
10392 case HLS_LINK_COOLDOWN:
10393 return OPA_PORTPHYSSTATE_OFFLINE;
10394 case HLS_DN_DOWNDEF:
10395 default:
10396 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10397 ppd->host_link_state);
10398 return -1;
10399 }
10400}
10401
10402/*
10403 * driver_logical_state - convert the driver's notion of a port's
10404 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10405 * (converted to a u32) to indicate error.
10406 */
10407u32 driver_logical_state(struct hfi1_pportdata *ppd)
10408{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010409 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010410 return IB_PORT_DOWN;
10411
10412 switch (ppd->host_link_state & HLS_UP) {
10413 case HLS_UP_INIT:
10414 return IB_PORT_INIT;
10415 case HLS_UP_ARMED:
10416 return IB_PORT_ARMED;
10417 case HLS_UP_ACTIVE:
10418 return IB_PORT_ACTIVE;
10419 default:
10420 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10421 ppd->host_link_state);
10422 return -1;
10423 }
10424}
10425
10426void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10427 u8 neigh_reason, u8 rem_reason)
10428{
10429 if (ppd->local_link_down_reason.latest == 0 &&
10430 ppd->neigh_link_down_reason.latest == 0) {
10431 ppd->local_link_down_reason.latest = lcl_reason;
10432 ppd->neigh_link_down_reason.latest = neigh_reason;
10433 ppd->remote_link_down_reason = rem_reason;
10434 }
10435}
10436
10437/*
10438 * Change the physical and/or logical link state.
10439 *
10440 * Do not call this routine while inside an interrupt. It contains
10441 * calls to routines that can take multiple seconds to finish.
10442 *
10443 * Returns 0 on success, -errno on failure.
10444 */
10445int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10446{
10447 struct hfi1_devdata *dd = ppd->dd;
10448 struct ib_event event = {.device = NULL};
10449 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010450 int orig_new_state, poll_bounce;
10451
10452 mutex_lock(&ppd->hls_lock);
10453
10454 orig_new_state = state;
10455 if (state == HLS_DN_DOWNDEF)
10456 state = dd->link_default;
10457
10458 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010459 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10460 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010461
10462 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010463 link_state_name(ppd->host_link_state),
10464 link_state_name(orig_new_state),
10465 poll_bounce ? "(bounce) " : "",
10466 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010467
Mike Marciniszyn77241052015-07-30 15:17:43 -040010468 /*
10469 * If we're going to a (HLS_*) link state that implies the logical
10470 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10471 * reset is_sm_config_started to 0.
10472 */
10473 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10474 ppd->is_sm_config_started = 0;
10475
10476 /*
10477 * Do nothing if the states match. Let a poll to poll link bounce
10478 * go through.
10479 */
10480 if (ppd->host_link_state == state && !poll_bounce)
10481 goto done;
10482
10483 switch (state) {
10484 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010485 if (ppd->host_link_state == HLS_DN_POLL &&
10486 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010487 /*
10488 * Quick link up jumps from polling to here.
10489 *
10490 * Whether in normal or loopback mode, the
10491 * simulator jumps from polling to link up.
10492 * Accept that here.
10493 */
Jubin John17fb4f22016-02-14 20:21:52 -080010494 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010495 } else if (ppd->host_link_state != HLS_GOING_UP) {
10496 goto unexpected;
10497 }
10498
10499 ppd->host_link_state = HLS_UP_INIT;
10500 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10501 if (ret) {
10502 /* logical state didn't change, stay at going_up */
10503 ppd->host_link_state = HLS_GOING_UP;
10504 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010505 "%s: logical state did not change to INIT\n",
10506 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010507 } else {
10508 /* clear old transient LINKINIT_REASON code */
10509 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10510 ppd->linkinit_reason =
10511 OPA_LINKINIT_REASON_LINKUP;
10512
10513 /* enable the port */
10514 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10515
10516 handle_linkup_change(dd, 1);
10517 }
10518 break;
10519 case HLS_UP_ARMED:
10520 if (ppd->host_link_state != HLS_UP_INIT)
10521 goto unexpected;
10522
10523 ppd->host_link_state = HLS_UP_ARMED;
10524 set_logical_state(dd, LSTATE_ARMED);
10525 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10526 if (ret) {
10527 /* logical state didn't change, stay at init */
10528 ppd->host_link_state = HLS_UP_INIT;
10529 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010530 "%s: logical state did not change to ARMED\n",
10531 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010532 }
10533 /*
10534 * The simulator does not currently implement SMA messages,
10535 * so neighbor_normal is not set. Set it here when we first
10536 * move to Armed.
10537 */
10538 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10539 ppd->neighbor_normal = 1;
10540 break;
10541 case HLS_UP_ACTIVE:
10542 if (ppd->host_link_state != HLS_UP_ARMED)
10543 goto unexpected;
10544
10545 ppd->host_link_state = HLS_UP_ACTIVE;
10546 set_logical_state(dd, LSTATE_ACTIVE);
10547 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10548 if (ret) {
10549 /* logical state didn't change, stay at armed */
10550 ppd->host_link_state = HLS_UP_ARMED;
10551 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010552 "%s: logical state did not change to ACTIVE\n",
10553 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010554 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010555 /* tell all engines to go running */
10556 sdma_all_running(dd);
10557
10558 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010559 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010560 event.element.port_num = ppd->port;
10561 event.event = IB_EVENT_PORT_ACTIVE;
10562 }
10563 break;
10564 case HLS_DN_POLL:
10565 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10566 ppd->host_link_state == HLS_DN_OFFLINE) &&
10567 dd->dc_shutdown)
10568 dc_start(dd);
10569 /* Hand LED control to the DC */
10570 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10571
10572 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10573 u8 tmp = ppd->link_enabled;
10574
10575 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10576 if (ret) {
10577 ppd->link_enabled = tmp;
10578 break;
10579 }
10580 ppd->remote_link_down_reason = 0;
10581
10582 if (ppd->driver_link_ready)
10583 ppd->link_enabled = 1;
10584 }
10585
Jim Snowfb9036d2016-01-11 18:32:21 -050010586 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010587 ret = set_local_link_attributes(ppd);
10588 if (ret)
10589 break;
10590
10591 ppd->port_error_action = 0;
10592 ppd->host_link_state = HLS_DN_POLL;
10593
10594 if (quick_linkup) {
10595 /* quick linkup does not go into polling */
10596 ret = do_quick_linkup(dd);
10597 } else {
10598 ret1 = set_physical_link_state(dd, PLS_POLLING);
10599 if (ret1 != HCMD_SUCCESS) {
10600 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010601 "Failed to transition to Polling link state, return 0x%x\n",
10602 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010603 ret = -EINVAL;
10604 }
10605 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010606 ppd->offline_disabled_reason =
10607 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010608 /*
10609 * If an error occurred above, go back to offline. The
10610 * caller may reschedule another attempt.
10611 */
10612 if (ret)
10613 goto_offline(ppd, 0);
10614 break;
10615 case HLS_DN_DISABLE:
10616 /* link is disabled */
10617 ppd->link_enabled = 0;
10618
10619 /* allow any state to transition to disabled */
10620
10621 /* must transition to offline first */
10622 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10623 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10624 if (ret)
10625 break;
10626 ppd->remote_link_down_reason = 0;
10627 }
10628
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010629 if (!dd->dc_shutdown) {
10630 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10631 if (ret1 != HCMD_SUCCESS) {
10632 dd_dev_err(dd,
10633 "Failed to transition to Disabled link state, return 0x%x\n",
10634 ret1);
10635 ret = -EINVAL;
10636 break;
10637 }
10638 dc_shutdown(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010639 }
10640 ppd->host_link_state = HLS_DN_DISABLE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010641 break;
10642 case HLS_DN_OFFLINE:
10643 if (ppd->host_link_state == HLS_DN_DISABLE)
10644 dc_start(dd);
10645
10646 /* allow any state to transition to offline */
10647 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10648 if (!ret)
10649 ppd->remote_link_down_reason = 0;
10650 break;
10651 case HLS_VERIFY_CAP:
10652 if (ppd->host_link_state != HLS_DN_POLL)
10653 goto unexpected;
10654 ppd->host_link_state = HLS_VERIFY_CAP;
10655 break;
10656 case HLS_GOING_UP:
10657 if (ppd->host_link_state != HLS_VERIFY_CAP)
10658 goto unexpected;
10659
10660 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10661 if (ret1 != HCMD_SUCCESS) {
10662 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010663 "Failed to transition to link up state, return 0x%x\n",
10664 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010665 ret = -EINVAL;
10666 break;
10667 }
10668 ppd->host_link_state = HLS_GOING_UP;
10669 break;
10670
10671 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10672 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10673 default:
10674 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010675 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010676 ret = -EINVAL;
10677 break;
10678 }
10679
Mike Marciniszyn77241052015-07-30 15:17:43 -040010680 goto done;
10681
10682unexpected:
10683 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010684 __func__, link_state_name(ppd->host_link_state),
10685 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010686 ret = -EINVAL;
10687
10688done:
10689 mutex_unlock(&ppd->hls_lock);
10690
10691 if (event.device)
10692 ib_dispatch_event(&event);
10693
10694 return ret;
10695}
10696
10697int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10698{
10699 u64 reg;
10700 int ret = 0;
10701
10702 switch (which) {
10703 case HFI1_IB_CFG_LIDLMC:
10704 set_lidlmc(ppd);
10705 break;
10706 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10707 /*
10708 * The VL Arbitrator high limit is sent in units of 4k
10709 * bytes, while HFI stores it in units of 64 bytes.
10710 */
Jubin John8638b772016-02-14 20:19:24 -080010711 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010712 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10713 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10714 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10715 break;
10716 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10717 /* HFI only supports POLL as the default link down state */
10718 if (val != HLS_DN_POLL)
10719 ret = -EINVAL;
10720 break;
10721 case HFI1_IB_CFG_OP_VLS:
10722 if (ppd->vls_operational != val) {
10723 ppd->vls_operational = val;
10724 if (!ppd->port)
10725 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010726 }
10727 break;
10728 /*
10729 * For link width, link width downgrade, and speed enable, always AND
10730 * the setting with what is actually supported. This has two benefits.
10731 * First, enabled can't have unsupported values, no matter what the
10732 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10733 * "fill in with your supported value" have all the bits in the
10734 * field set, so simply ANDing with supported has the desired result.
10735 */
10736 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10737 ppd->link_width_enabled = val & ppd->link_width_supported;
10738 break;
10739 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10740 ppd->link_width_downgrade_enabled =
10741 val & ppd->link_width_downgrade_supported;
10742 break;
10743 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10744 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10745 break;
10746 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10747 /*
10748 * HFI does not follow IB specs, save this value
10749 * so we can report it, if asked.
10750 */
10751 ppd->overrun_threshold = val;
10752 break;
10753 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10754 /*
10755 * HFI does not follow IB specs, save this value
10756 * so we can report it, if asked.
10757 */
10758 ppd->phy_error_threshold = val;
10759 break;
10760
10761 case HFI1_IB_CFG_MTU:
10762 set_send_length(ppd);
10763 break;
10764
10765 case HFI1_IB_CFG_PKEYS:
10766 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10767 set_partition_keys(ppd);
10768 break;
10769
10770 default:
10771 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10772 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010773 "%s: which %s, val 0x%x: not implemented\n",
10774 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010775 break;
10776 }
10777 return ret;
10778}
10779
10780/* begin functions related to vl arbitration table caching */
10781static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10782{
10783 int i;
10784
10785 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10786 VL_ARB_LOW_PRIO_TABLE_SIZE);
10787 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10788 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10789
10790 /*
10791 * Note that we always return values directly from the
10792 * 'vl_arb_cache' (and do no CSR reads) in response to a
10793 * 'Get(VLArbTable)'. This is obviously correct after a
10794 * 'Set(VLArbTable)', since the cache will then be up to
10795 * date. But it's also correct prior to any 'Set(VLArbTable)'
10796 * since then both the cache, and the relevant h/w registers
10797 * will be zeroed.
10798 */
10799
10800 for (i = 0; i < MAX_PRIO_TABLE; i++)
10801 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10802}
10803
10804/*
10805 * vl_arb_lock_cache
10806 *
10807 * All other vl_arb_* functions should be called only after locking
10808 * the cache.
10809 */
10810static inline struct vl_arb_cache *
10811vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10812{
10813 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10814 return NULL;
10815 spin_lock(&ppd->vl_arb_cache[idx].lock);
10816 return &ppd->vl_arb_cache[idx];
10817}
10818
10819static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10820{
10821 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10822}
10823
10824static void vl_arb_get_cache(struct vl_arb_cache *cache,
10825 struct ib_vl_weight_elem *vl)
10826{
10827 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10828}
10829
10830static void vl_arb_set_cache(struct vl_arb_cache *cache,
10831 struct ib_vl_weight_elem *vl)
10832{
10833 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10834}
10835
10836static int vl_arb_match_cache(struct vl_arb_cache *cache,
10837 struct ib_vl_weight_elem *vl)
10838{
10839 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10840}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010841
Mike Marciniszyn77241052015-07-30 15:17:43 -040010842/* end functions related to vl arbitration table caching */
10843
10844static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10845 u32 size, struct ib_vl_weight_elem *vl)
10846{
10847 struct hfi1_devdata *dd = ppd->dd;
10848 u64 reg;
10849 unsigned int i, is_up = 0;
10850 int drain, ret = 0;
10851
10852 mutex_lock(&ppd->hls_lock);
10853
10854 if (ppd->host_link_state & HLS_UP)
10855 is_up = 1;
10856
10857 drain = !is_ax(dd) && is_up;
10858
10859 if (drain)
10860 /*
10861 * Before adjusting VL arbitration weights, empty per-VL
10862 * FIFOs, otherwise a packet whose VL weight is being
10863 * set to 0 could get stuck in a FIFO with no chance to
10864 * egress.
10865 */
10866 ret = stop_drain_data_vls(dd);
10867
10868 if (ret) {
10869 dd_dev_err(
10870 dd,
10871 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10872 __func__);
10873 goto err;
10874 }
10875
10876 for (i = 0; i < size; i++, vl++) {
10877 /*
10878 * NOTE: The low priority shift and mask are used here, but
10879 * they are the same for both the low and high registers.
10880 */
10881 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10882 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10883 | (((u64)vl->weight
10884 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10885 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10886 write_csr(dd, target + (i * 8), reg);
10887 }
10888 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10889
10890 if (drain)
10891 open_fill_data_vls(dd); /* reopen all VLs */
10892
10893err:
10894 mutex_unlock(&ppd->hls_lock);
10895
10896 return ret;
10897}
10898
10899/*
10900 * Read one credit merge VL register.
10901 */
10902static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10903 struct vl_limit *vll)
10904{
10905 u64 reg = read_csr(dd, csr);
10906
10907 vll->dedicated = cpu_to_be16(
10908 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10909 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10910 vll->shared = cpu_to_be16(
10911 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10912 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10913}
10914
10915/*
10916 * Read the current credit merge limits.
10917 */
10918static int get_buffer_control(struct hfi1_devdata *dd,
10919 struct buffer_control *bc, u16 *overall_limit)
10920{
10921 u64 reg;
10922 int i;
10923
10924 /* not all entries are filled in */
10925 memset(bc, 0, sizeof(*bc));
10926
10927 /* OPA and HFI have a 1-1 mapping */
10928 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010929 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010930
10931 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10932 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10933
10934 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10935 bc->overall_shared_limit = cpu_to_be16(
10936 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10937 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10938 if (overall_limit)
10939 *overall_limit = (reg
10940 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10941 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10942 return sizeof(struct buffer_control);
10943}
10944
10945static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10946{
10947 u64 reg;
10948 int i;
10949
10950 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10951 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10952 for (i = 0; i < sizeof(u64); i++) {
10953 u8 byte = *(((u8 *)&reg) + i);
10954
10955 dp->vlnt[2 * i] = byte & 0xf;
10956 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10957 }
10958
10959 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10960 for (i = 0; i < sizeof(u64); i++) {
10961 u8 byte = *(((u8 *)&reg) + i);
10962
10963 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10964 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10965 }
10966 return sizeof(struct sc2vlnt);
10967}
10968
10969static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10970 struct ib_vl_weight_elem *vl)
10971{
10972 unsigned int i;
10973
10974 for (i = 0; i < nelems; i++, vl++) {
10975 vl->vl = 0xf;
10976 vl->weight = 0;
10977 }
10978}
10979
10980static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10981{
10982 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010983 DC_SC_VL_VAL(15_0,
10984 0, dp->vlnt[0] & 0xf,
10985 1, dp->vlnt[1] & 0xf,
10986 2, dp->vlnt[2] & 0xf,
10987 3, dp->vlnt[3] & 0xf,
10988 4, dp->vlnt[4] & 0xf,
10989 5, dp->vlnt[5] & 0xf,
10990 6, dp->vlnt[6] & 0xf,
10991 7, dp->vlnt[7] & 0xf,
10992 8, dp->vlnt[8] & 0xf,
10993 9, dp->vlnt[9] & 0xf,
10994 10, dp->vlnt[10] & 0xf,
10995 11, dp->vlnt[11] & 0xf,
10996 12, dp->vlnt[12] & 0xf,
10997 13, dp->vlnt[13] & 0xf,
10998 14, dp->vlnt[14] & 0xf,
10999 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011000 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080011001 DC_SC_VL_VAL(31_16,
11002 16, dp->vlnt[16] & 0xf,
11003 17, dp->vlnt[17] & 0xf,
11004 18, dp->vlnt[18] & 0xf,
11005 19, dp->vlnt[19] & 0xf,
11006 20, dp->vlnt[20] & 0xf,
11007 21, dp->vlnt[21] & 0xf,
11008 22, dp->vlnt[22] & 0xf,
11009 23, dp->vlnt[23] & 0xf,
11010 24, dp->vlnt[24] & 0xf,
11011 25, dp->vlnt[25] & 0xf,
11012 26, dp->vlnt[26] & 0xf,
11013 27, dp->vlnt[27] & 0xf,
11014 28, dp->vlnt[28] & 0xf,
11015 29, dp->vlnt[29] & 0xf,
11016 30, dp->vlnt[30] & 0xf,
11017 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011018}
11019
11020static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11021 u16 limit)
11022{
11023 if (limit != 0)
11024 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011025 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011026}
11027
11028/* change only the shared limit portion of SendCmGLobalCredit */
11029static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11030{
11031 u64 reg;
11032
11033 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11034 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11035 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11036 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11037}
11038
11039/* change only the total credit limit portion of SendCmGLobalCredit */
11040static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11041{
11042 u64 reg;
11043
11044 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11045 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11046 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11047 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11048}
11049
11050/* set the given per-VL shared limit */
11051static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11052{
11053 u64 reg;
11054 u32 addr;
11055
11056 if (vl < TXE_NUM_DATA_VL)
11057 addr = SEND_CM_CREDIT_VL + (8 * vl);
11058 else
11059 addr = SEND_CM_CREDIT_VL15;
11060
11061 reg = read_csr(dd, addr);
11062 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11063 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11064 write_csr(dd, addr, reg);
11065}
11066
11067/* set the given per-VL dedicated limit */
11068static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11069{
11070 u64 reg;
11071 u32 addr;
11072
11073 if (vl < TXE_NUM_DATA_VL)
11074 addr = SEND_CM_CREDIT_VL + (8 * vl);
11075 else
11076 addr = SEND_CM_CREDIT_VL15;
11077
11078 reg = read_csr(dd, addr);
11079 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11080 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11081 write_csr(dd, addr, reg);
11082}
11083
11084/* spin until the given per-VL status mask bits clear */
11085static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11086 const char *which)
11087{
11088 unsigned long timeout;
11089 u64 reg;
11090
11091 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11092 while (1) {
11093 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11094
11095 if (reg == 0)
11096 return; /* success */
11097 if (time_after(jiffies, timeout))
11098 break; /* timed out */
11099 udelay(1);
11100 }
11101
11102 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011103 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11104 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011105 /*
11106 * If this occurs, it is likely there was a credit loss on the link.
11107 * The only recovery from that is a link bounce.
11108 */
11109 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011110 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011111}
11112
11113/*
11114 * The number of credits on the VLs may be changed while everything
11115 * is "live", but the following algorithm must be followed due to
11116 * how the hardware is actually implemented. In particular,
11117 * Return_Credit_Status[] is the only correct status check.
11118 *
11119 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11120 * set Global_Shared_Credit_Limit = 0
11121 * use_all_vl = 1
11122 * mask0 = all VLs that are changing either dedicated or shared limits
11123 * set Shared_Limit[mask0] = 0
11124 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11125 * if (changing any dedicated limit)
11126 * mask1 = all VLs that are lowering dedicated limits
11127 * lower Dedicated_Limit[mask1]
11128 * spin until Return_Credit_Status[mask1] == 0
11129 * raise Dedicated_Limits
11130 * raise Shared_Limits
11131 * raise Global_Shared_Credit_Limit
11132 *
11133 * lower = if the new limit is lower, set the limit to the new value
11134 * raise = if the new limit is higher than the current value (may be changed
11135 * earlier in the algorithm), set the new limit to the new value
11136 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011137int set_buffer_control(struct hfi1_pportdata *ppd,
11138 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011139{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011140 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011141 u64 changing_mask, ld_mask, stat_mask;
11142 int change_count;
11143 int i, use_all_mask;
11144 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011145 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011146 /*
11147 * A0: add the variable any_shared_limit_changing below and in the
11148 * algorithm above. If removing A0 support, it can be removed.
11149 */
11150 int any_shared_limit_changing;
11151 struct buffer_control cur_bc;
11152 u8 changing[OPA_MAX_VLS];
11153 u8 lowering_dedicated[OPA_MAX_VLS];
11154 u16 cur_total;
11155 u32 new_total = 0;
11156 const u64 all_mask =
11157 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11158 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11159 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11160 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11161 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11162 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11163 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11164 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11165 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11166
11167#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11168#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11169
Mike Marciniszyn77241052015-07-30 15:17:43 -040011170 /* find the new total credits, do sanity check on unused VLs */
11171 for (i = 0; i < OPA_MAX_VLS; i++) {
11172 if (valid_vl(i)) {
11173 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11174 continue;
11175 }
11176 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080011177 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011178 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080011179 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011180 new_bc->vl[i].dedicated = 0;
11181 new_bc->vl[i].shared = 0;
11182 }
11183 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050011184
Mike Marciniszyn77241052015-07-30 15:17:43 -040011185 /* fetch the current values */
11186 get_buffer_control(dd, &cur_bc, &cur_total);
11187
11188 /*
11189 * Create the masks we will use.
11190 */
11191 memset(changing, 0, sizeof(changing));
11192 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080011193 /*
11194 * NOTE: Assumes that the individual VL bits are adjacent and in
11195 * increasing order
11196 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011197 stat_mask =
11198 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11199 changing_mask = 0;
11200 ld_mask = 0;
11201 change_count = 0;
11202 any_shared_limit_changing = 0;
11203 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11204 if (!valid_vl(i))
11205 continue;
11206 this_shared_changing = new_bc->vl[i].shared
11207 != cur_bc.vl[i].shared;
11208 if (this_shared_changing)
11209 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080011210 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11211 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011212 changing[i] = 1;
11213 changing_mask |= stat_mask;
11214 change_count++;
11215 }
11216 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11217 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11218 lowering_dedicated[i] = 1;
11219 ld_mask |= stat_mask;
11220 }
11221 }
11222
11223 /* bracket the credit change with a total adjustment */
11224 if (new_total > cur_total)
11225 set_global_limit(dd, new_total);
11226
11227 /*
11228 * Start the credit change algorithm.
11229 */
11230 use_all_mask = 0;
11231 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011232 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11233 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011234 set_global_shared(dd, 0);
11235 cur_bc.overall_shared_limit = 0;
11236 use_all_mask = 1;
11237 }
11238
11239 for (i = 0; i < NUM_USABLE_VLS; i++) {
11240 if (!valid_vl(i))
11241 continue;
11242
11243 if (changing[i]) {
11244 set_vl_shared(dd, i, 0);
11245 cur_bc.vl[i].shared = 0;
11246 }
11247 }
11248
11249 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080011250 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011251
11252 if (change_count > 0) {
11253 for (i = 0; i < NUM_USABLE_VLS; i++) {
11254 if (!valid_vl(i))
11255 continue;
11256
11257 if (lowering_dedicated[i]) {
11258 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011259 be16_to_cpu(new_bc->
11260 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011261 cur_bc.vl[i].dedicated =
11262 new_bc->vl[i].dedicated;
11263 }
11264 }
11265
11266 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11267
11268 /* now raise all dedicated that are going up */
11269 for (i = 0; i < NUM_USABLE_VLS; i++) {
11270 if (!valid_vl(i))
11271 continue;
11272
11273 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11274 be16_to_cpu(cur_bc.vl[i].dedicated))
11275 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011276 be16_to_cpu(new_bc->
11277 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011278 }
11279 }
11280
11281 /* next raise all shared that are going up */
11282 for (i = 0; i < NUM_USABLE_VLS; i++) {
11283 if (!valid_vl(i))
11284 continue;
11285
11286 if (be16_to_cpu(new_bc->vl[i].shared) >
11287 be16_to_cpu(cur_bc.vl[i].shared))
11288 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11289 }
11290
11291 /* finally raise the global shared */
11292 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011293 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011294 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011295 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011296
11297 /* bracket the credit change with a total adjustment */
11298 if (new_total < cur_total)
11299 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011300
11301 /*
11302 * Determine the actual number of operational VLS using the number of
11303 * dedicated and shared credits for each VL.
11304 */
11305 if (change_count > 0) {
11306 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11307 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11308 be16_to_cpu(new_bc->vl[i].shared) > 0)
11309 vl_count++;
11310 ppd->actual_vls_operational = vl_count;
11311 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11312 ppd->actual_vls_operational :
11313 ppd->vls_operational,
11314 NULL);
11315 if (ret == 0)
11316 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11317 ppd->actual_vls_operational :
11318 ppd->vls_operational, NULL);
11319 if (ret)
11320 return ret;
11321 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011322 return 0;
11323}
11324
11325/*
11326 * Read the given fabric manager table. Return the size of the
11327 * table (in bytes) on success, and a negative error code on
11328 * failure.
11329 */
11330int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11331
11332{
11333 int size;
11334 struct vl_arb_cache *vlc;
11335
11336 switch (which) {
11337 case FM_TBL_VL_HIGH_ARB:
11338 size = 256;
11339 /*
11340 * OPA specifies 128 elements (of 2 bytes each), though
11341 * HFI supports only 16 elements in h/w.
11342 */
11343 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11344 vl_arb_get_cache(vlc, t);
11345 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11346 break;
11347 case FM_TBL_VL_LOW_ARB:
11348 size = 256;
11349 /*
11350 * OPA specifies 128 elements (of 2 bytes each), though
11351 * HFI supports only 16 elements in h/w.
11352 */
11353 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11354 vl_arb_get_cache(vlc, t);
11355 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11356 break;
11357 case FM_TBL_BUFFER_CONTROL:
11358 size = get_buffer_control(ppd->dd, t, NULL);
11359 break;
11360 case FM_TBL_SC2VLNT:
11361 size = get_sc2vlnt(ppd->dd, t);
11362 break;
11363 case FM_TBL_VL_PREEMPT_ELEMS:
11364 size = 256;
11365 /* OPA specifies 128 elements, of 2 bytes each */
11366 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11367 break;
11368 case FM_TBL_VL_PREEMPT_MATRIX:
11369 size = 256;
11370 /*
11371 * OPA specifies that this is the same size as the VL
11372 * arbitration tables (i.e., 256 bytes).
11373 */
11374 break;
11375 default:
11376 return -EINVAL;
11377 }
11378 return size;
11379}
11380
11381/*
11382 * Write the given fabric manager table.
11383 */
11384int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11385{
11386 int ret = 0;
11387 struct vl_arb_cache *vlc;
11388
11389 switch (which) {
11390 case FM_TBL_VL_HIGH_ARB:
11391 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11392 if (vl_arb_match_cache(vlc, t)) {
11393 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11394 break;
11395 }
11396 vl_arb_set_cache(vlc, t);
11397 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11398 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11399 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11400 break;
11401 case FM_TBL_VL_LOW_ARB:
11402 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11403 if (vl_arb_match_cache(vlc, t)) {
11404 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11405 break;
11406 }
11407 vl_arb_set_cache(vlc, t);
11408 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11409 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11410 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11411 break;
11412 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011413 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011414 break;
11415 case FM_TBL_SC2VLNT:
11416 set_sc2vlnt(ppd->dd, t);
11417 break;
11418 default:
11419 ret = -EINVAL;
11420 }
11421 return ret;
11422}
11423
11424/*
11425 * Disable all data VLs.
11426 *
11427 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11428 */
11429static int disable_data_vls(struct hfi1_devdata *dd)
11430{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011431 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011432 return 1;
11433
11434 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11435
11436 return 0;
11437}
11438
11439/*
11440 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11441 * Just re-enables all data VLs (the "fill" part happens
11442 * automatically - the name was chosen for symmetry with
11443 * stop_drain_data_vls()).
11444 *
11445 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11446 */
11447int open_fill_data_vls(struct hfi1_devdata *dd)
11448{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011449 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011450 return 1;
11451
11452 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11453
11454 return 0;
11455}
11456
11457/*
11458 * drain_data_vls() - assumes that disable_data_vls() has been called,
11459 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11460 * engines to drop to 0.
11461 */
11462static void drain_data_vls(struct hfi1_devdata *dd)
11463{
11464 sc_wait(dd);
11465 sdma_wait(dd);
11466 pause_for_credit_return(dd);
11467}
11468
11469/*
11470 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11471 *
11472 * Use open_fill_data_vls() to resume using data VLs. This pair is
11473 * meant to be used like this:
11474 *
11475 * stop_drain_data_vls(dd);
11476 * // do things with per-VL resources
11477 * open_fill_data_vls(dd);
11478 */
11479int stop_drain_data_vls(struct hfi1_devdata *dd)
11480{
11481 int ret;
11482
11483 ret = disable_data_vls(dd);
11484 if (ret == 0)
11485 drain_data_vls(dd);
11486
11487 return ret;
11488}
11489
11490/*
11491 * Convert a nanosecond time to a cclock count. No matter how slow
11492 * the cclock, a non-zero ns will always have a non-zero result.
11493 */
11494u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11495{
11496 u32 cclocks;
11497
11498 if (dd->icode == ICODE_FPGA_EMULATION)
11499 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11500 else /* simulation pretends to be ASIC */
11501 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11502 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11503 cclocks = 1;
11504 return cclocks;
11505}
11506
11507/*
11508 * Convert a cclock count to nanoseconds. Not matter how slow
11509 * the cclock, a non-zero cclocks will always have a non-zero result.
11510 */
11511u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11512{
11513 u32 ns;
11514
11515 if (dd->icode == ICODE_FPGA_EMULATION)
11516 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11517 else /* simulation pretends to be ASIC */
11518 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11519 if (cclocks && !ns)
11520 ns = 1;
11521 return ns;
11522}
11523
11524/*
11525 * Dynamically adjust the receive interrupt timeout for a context based on
11526 * incoming packet rate.
11527 *
11528 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11529 */
11530static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11531{
11532 struct hfi1_devdata *dd = rcd->dd;
11533 u32 timeout = rcd->rcvavail_timeout;
11534
11535 /*
11536 * This algorithm doubles or halves the timeout depending on whether
11537 * the number of packets received in this interrupt were less than or
11538 * greater equal the interrupt count.
11539 *
11540 * The calculations below do not allow a steady state to be achieved.
11541 * Only at the endpoints it is possible to have an unchanging
11542 * timeout.
11543 */
11544 if (npkts < rcv_intr_count) {
11545 /*
11546 * Not enough packets arrived before the timeout, adjust
11547 * timeout downward.
11548 */
11549 if (timeout < 2) /* already at minimum? */
11550 return;
11551 timeout >>= 1;
11552 } else {
11553 /*
11554 * More than enough packets arrived before the timeout, adjust
11555 * timeout upward.
11556 */
11557 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11558 return;
11559 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11560 }
11561
11562 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011563 /*
11564 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11565 * been verified to be in range
11566 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011567 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011568 (u64)timeout <<
11569 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011570}
11571
11572void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11573 u32 intr_adjust, u32 npkts)
11574{
11575 struct hfi1_devdata *dd = rcd->dd;
11576 u64 reg;
11577 u32 ctxt = rcd->ctxt;
11578
11579 /*
11580 * Need to write timeout register before updating RcvHdrHead to ensure
11581 * that a new value is used when the HW decides to restart counting.
11582 */
11583 if (intr_adjust)
11584 adjust_rcv_timeout(rcd, npkts);
11585 if (updegr) {
11586 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11587 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11588 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11589 }
11590 mmiowb();
11591 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11592 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11593 << RCV_HDR_HEAD_HEAD_SHIFT);
11594 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11595 mmiowb();
11596}
11597
11598u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11599{
11600 u32 head, tail;
11601
11602 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11603 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11604
11605 if (rcd->rcvhdrtail_kvaddr)
11606 tail = get_rcvhdrtail(rcd);
11607 else
11608 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11609
11610 return head == tail;
11611}
11612
11613/*
11614 * Context Control and Receive Array encoding for buffer size:
11615 * 0x0 invalid
11616 * 0x1 4 KB
11617 * 0x2 8 KB
11618 * 0x3 16 KB
11619 * 0x4 32 KB
11620 * 0x5 64 KB
11621 * 0x6 128 KB
11622 * 0x7 256 KB
11623 * 0x8 512 KB (Receive Array only)
11624 * 0x9 1 MB (Receive Array only)
11625 * 0xa 2 MB (Receive Array only)
11626 *
11627 * 0xB-0xF - reserved (Receive Array only)
11628 *
11629 *
11630 * This routine assumes that the value has already been sanity checked.
11631 */
11632static u32 encoded_size(u32 size)
11633{
11634 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011635 case 4 * 1024: return 0x1;
11636 case 8 * 1024: return 0x2;
11637 case 16 * 1024: return 0x3;
11638 case 32 * 1024: return 0x4;
11639 case 64 * 1024: return 0x5;
11640 case 128 * 1024: return 0x6;
11641 case 256 * 1024: return 0x7;
11642 case 512 * 1024: return 0x8;
11643 case 1 * 1024 * 1024: return 0x9;
11644 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011645 }
11646 return 0x1; /* if invalid, go with the minimum size */
11647}
11648
11649void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11650{
11651 struct hfi1_ctxtdata *rcd;
11652 u64 rcvctrl, reg;
11653 int did_enable = 0;
11654
11655 rcd = dd->rcd[ctxt];
11656 if (!rcd)
11657 return;
11658
11659 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11660
11661 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11662 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011663 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11664 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011665 /* reset the tail and hdr addresses, and sequence count */
11666 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011667 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011668 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11669 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011670 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011671 rcd->seq_cnt = 1;
11672
11673 /* reset the cached receive header queue head value */
11674 rcd->head = 0;
11675
11676 /*
11677 * Zero the receive header queue so we don't get false
11678 * positives when checking the sequence number. The
11679 * sequence numbers could land exactly on the same spot.
11680 * E.g. a rcd restart before the receive header wrapped.
11681 */
11682 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11683
11684 /* starting timeout */
11685 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11686
11687 /* enable the context */
11688 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11689
11690 /* clean the egr buffer size first */
11691 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11692 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11693 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11694 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11695
11696 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11697 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11698 did_enable = 1;
11699
11700 /* zero RcvEgrIndexHead */
11701 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11702
11703 /* set eager count and base index */
11704 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11705 & RCV_EGR_CTRL_EGR_CNT_MASK)
11706 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11707 (((rcd->eager_base >> RCV_SHIFT)
11708 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11709 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11710 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11711
11712 /*
11713 * Set TID (expected) count and base index.
11714 * rcd->expected_count is set to individual RcvArray entries,
11715 * not pairs, and the CSR takes a pair-count in groups of
11716 * four, so divide by 8.
11717 */
11718 reg = (((rcd->expected_count >> RCV_SHIFT)
11719 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11720 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11721 (((rcd->expected_base >> RCV_SHIFT)
11722 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11723 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11724 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011725 if (ctxt == HFI1_CTRL_CTXT)
11726 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011727 }
11728 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11729 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011730 /*
11731 * When receive context is being disabled turn on tail
11732 * update with a dummy tail address and then disable
11733 * receive context.
11734 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011735 if (dd->rcvhdrtail_dummy_dma) {
Mark F. Brown46b010d2015-11-09 19:18:20 -050011736 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011737 dd->rcvhdrtail_dummy_dma);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011738 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011739 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11740 }
11741
Mike Marciniszyn77241052015-07-30 15:17:43 -040011742 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11743 }
11744 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11745 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11746 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11747 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011748 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011749 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011750 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11751 /* See comment on RcvCtxtCtrl.TailUpd above */
11752 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11753 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11754 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011755 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11756 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11757 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11758 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11759 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011760 /*
11761 * In one-packet-per-eager mode, the size comes from
11762 * the RcvArray entry.
11763 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011764 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11765 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11766 }
11767 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11768 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11769 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11770 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11771 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11772 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11773 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11774 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11775 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11776 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11777 rcd->rcvctrl = rcvctrl;
11778 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11779 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11780
11781 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011782 if (did_enable &&
11783 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011784 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11785 if (reg != 0) {
11786 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011787 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011788 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11789 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11790 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11791 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11792 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11793 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011794 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011795 }
11796 }
11797
11798 if (did_enable) {
11799 /*
11800 * The interrupt timeout and count must be set after
11801 * the context is enabled to take effect.
11802 */
11803 /* set interrupt timeout */
11804 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011805 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011806 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11807
11808 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11809 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11810 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11811 }
11812
11813 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11814 /*
11815 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011816 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11817 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011818 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011819 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011820 dd->rcvhdrtail_dummy_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011821}
11822
Dean Luick582e05c2016-02-18 11:13:01 -080011823u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011824{
11825 int ret;
11826 u64 val = 0;
11827
11828 if (namep) {
11829 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011830 *namep = dd->cntrnames;
11831 } else {
11832 const struct cntr_entry *entry;
11833 int i, j;
11834
11835 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011836
11837 /* Get the start of the block of counters */
11838 *cntrp = dd->cntrs;
11839
11840 /*
11841 * Now go and fill in each counter in the block.
11842 */
11843 for (i = 0; i < DEV_CNTR_LAST; i++) {
11844 entry = &dev_cntrs[i];
11845 hfi1_cdbg(CNTR, "reading %s", entry->name);
11846 if (entry->flags & CNTR_DISABLED) {
11847 /* Nothing */
11848 hfi1_cdbg(CNTR, "\tDisabled\n");
11849 } else {
11850 if (entry->flags & CNTR_VL) {
11851 hfi1_cdbg(CNTR, "\tPer VL\n");
11852 for (j = 0; j < C_VL_COUNT; j++) {
11853 val = entry->rw_cntr(entry,
11854 dd, j,
11855 CNTR_MODE_R,
11856 0);
11857 hfi1_cdbg(
11858 CNTR,
11859 "\t\tRead 0x%llx for %d\n",
11860 val, j);
11861 dd->cntrs[entry->offset + j] =
11862 val;
11863 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011864 } else if (entry->flags & CNTR_SDMA) {
11865 hfi1_cdbg(CNTR,
11866 "\t Per SDMA Engine\n");
11867 for (j = 0; j < dd->chip_sdma_engines;
11868 j++) {
11869 val =
11870 entry->rw_cntr(entry, dd, j,
11871 CNTR_MODE_R, 0);
11872 hfi1_cdbg(CNTR,
11873 "\t\tRead 0x%llx for %d\n",
11874 val, j);
11875 dd->cntrs[entry->offset + j] =
11876 val;
11877 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011878 } else {
11879 val = entry->rw_cntr(entry, dd,
11880 CNTR_INVALID_VL,
11881 CNTR_MODE_R, 0);
11882 dd->cntrs[entry->offset] = val;
11883 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11884 }
11885 }
11886 }
11887 }
11888 return ret;
11889}
11890
11891/*
11892 * Used by sysfs to create files for hfi stats to read
11893 */
Dean Luick582e05c2016-02-18 11:13:01 -080011894u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011895{
11896 int ret;
11897 u64 val = 0;
11898
11899 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011900 ret = ppd->dd->portcntrnameslen;
11901 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011902 } else {
11903 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011904 int i, j;
11905
Dean Luick582e05c2016-02-18 11:13:01 -080011906 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011907 *cntrp = ppd->cntrs;
11908
11909 for (i = 0; i < PORT_CNTR_LAST; i++) {
11910 entry = &port_cntrs[i];
11911 hfi1_cdbg(CNTR, "reading %s", entry->name);
11912 if (entry->flags & CNTR_DISABLED) {
11913 /* Nothing */
11914 hfi1_cdbg(CNTR, "\tDisabled\n");
11915 continue;
11916 }
11917
11918 if (entry->flags & CNTR_VL) {
11919 hfi1_cdbg(CNTR, "\tPer VL");
11920 for (j = 0; j < C_VL_COUNT; j++) {
11921 val = entry->rw_cntr(entry, ppd, j,
11922 CNTR_MODE_R,
11923 0);
11924 hfi1_cdbg(
11925 CNTR,
11926 "\t\tRead 0x%llx for %d",
11927 val, j);
11928 ppd->cntrs[entry->offset + j] = val;
11929 }
11930 } else {
11931 val = entry->rw_cntr(entry, ppd,
11932 CNTR_INVALID_VL,
11933 CNTR_MODE_R,
11934 0);
11935 ppd->cntrs[entry->offset] = val;
11936 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11937 }
11938 }
11939 }
11940 return ret;
11941}
11942
11943static void free_cntrs(struct hfi1_devdata *dd)
11944{
11945 struct hfi1_pportdata *ppd;
11946 int i;
11947
11948 if (dd->synth_stats_timer.data)
11949 del_timer_sync(&dd->synth_stats_timer);
11950 dd->synth_stats_timer.data = 0;
11951 ppd = (struct hfi1_pportdata *)(dd + 1);
11952 for (i = 0; i < dd->num_pports; i++, ppd++) {
11953 kfree(ppd->cntrs);
11954 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011955 free_percpu(ppd->ibport_data.rvp.rc_acks);
11956 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11957 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011958 ppd->cntrs = NULL;
11959 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011960 ppd->ibport_data.rvp.rc_acks = NULL;
11961 ppd->ibport_data.rvp.rc_qacks = NULL;
11962 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011963 }
11964 kfree(dd->portcntrnames);
11965 dd->portcntrnames = NULL;
11966 kfree(dd->cntrs);
11967 dd->cntrs = NULL;
11968 kfree(dd->scntrs);
11969 dd->scntrs = NULL;
11970 kfree(dd->cntrnames);
11971 dd->cntrnames = NULL;
11972}
11973
Mike Marciniszyn77241052015-07-30 15:17:43 -040011974static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11975 u64 *psval, void *context, int vl)
11976{
11977 u64 val;
11978 u64 sval = *psval;
11979
11980 if (entry->flags & CNTR_DISABLED) {
11981 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11982 return 0;
11983 }
11984
11985 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11986
11987 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11988
11989 /* If its a synthetic counter there is more work we need to do */
11990 if (entry->flags & CNTR_SYNTH) {
11991 if (sval == CNTR_MAX) {
11992 /* No need to read already saturated */
11993 return CNTR_MAX;
11994 }
11995
11996 if (entry->flags & CNTR_32BIT) {
11997 /* 32bit counters can wrap multiple times */
11998 u64 upper = sval >> 32;
11999 u64 lower = (sval << 32) >> 32;
12000
12001 if (lower > val) { /* hw wrapped */
12002 if (upper == CNTR_32BIT_MAX)
12003 val = CNTR_MAX;
12004 else
12005 upper++;
12006 }
12007
12008 if (val != CNTR_MAX)
12009 val = (upper << 32) | val;
12010
12011 } else {
12012 /* If we rolled we are saturated */
12013 if ((val < sval) || (val > CNTR_MAX))
12014 val = CNTR_MAX;
12015 }
12016 }
12017
12018 *psval = val;
12019
12020 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12021
12022 return val;
12023}
12024
12025static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12026 struct cntr_entry *entry,
12027 u64 *psval, void *context, int vl, u64 data)
12028{
12029 u64 val;
12030
12031 if (entry->flags & CNTR_DISABLED) {
12032 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12033 return 0;
12034 }
12035
12036 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12037
12038 if (entry->flags & CNTR_SYNTH) {
12039 *psval = data;
12040 if (entry->flags & CNTR_32BIT) {
12041 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12042 (data << 32) >> 32);
12043 val = data; /* return the full 64bit value */
12044 } else {
12045 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12046 data);
12047 }
12048 } else {
12049 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12050 }
12051
12052 *psval = val;
12053
12054 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12055
12056 return val;
12057}
12058
12059u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12060{
12061 struct cntr_entry *entry;
12062 u64 *sval;
12063
12064 entry = &dev_cntrs[index];
12065 sval = dd->scntrs + entry->offset;
12066
12067 if (vl != CNTR_INVALID_VL)
12068 sval += vl;
12069
12070 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12071}
12072
12073u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12074{
12075 struct cntr_entry *entry;
12076 u64 *sval;
12077
12078 entry = &dev_cntrs[index];
12079 sval = dd->scntrs + entry->offset;
12080
12081 if (vl != CNTR_INVALID_VL)
12082 sval += vl;
12083
12084 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12085}
12086
12087u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12088{
12089 struct cntr_entry *entry;
12090 u64 *sval;
12091
12092 entry = &port_cntrs[index];
12093 sval = ppd->scntrs + entry->offset;
12094
12095 if (vl != CNTR_INVALID_VL)
12096 sval += vl;
12097
12098 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12099 (index <= C_RCV_HDR_OVF_LAST)) {
12100 /* We do not want to bother for disabled contexts */
12101 return 0;
12102 }
12103
12104 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12105}
12106
12107u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12108{
12109 struct cntr_entry *entry;
12110 u64 *sval;
12111
12112 entry = &port_cntrs[index];
12113 sval = ppd->scntrs + entry->offset;
12114
12115 if (vl != CNTR_INVALID_VL)
12116 sval += vl;
12117
12118 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12119 (index <= C_RCV_HDR_OVF_LAST)) {
12120 /* We do not want to bother for disabled contexts */
12121 return 0;
12122 }
12123
12124 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12125}
12126
12127static void update_synth_timer(unsigned long opaque)
12128{
12129 u64 cur_tx;
12130 u64 cur_rx;
12131 u64 total_flits;
12132 u8 update = 0;
12133 int i, j, vl;
12134 struct hfi1_pportdata *ppd;
12135 struct cntr_entry *entry;
12136
12137 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12138
12139 /*
12140 * Rather than keep beating on the CSRs pick a minimal set that we can
12141 * check to watch for potential roll over. We can do this by looking at
12142 * the number of flits sent/recv. If the total flits exceeds 32bits then
12143 * we have to iterate all the counters and update.
12144 */
12145 entry = &dev_cntrs[C_DC_RCV_FLITS];
12146 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12147
12148 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12149 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12150
12151 hfi1_cdbg(
12152 CNTR,
12153 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12154 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12155
12156 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12157 /*
12158 * May not be strictly necessary to update but it won't hurt and
12159 * simplifies the logic here.
12160 */
12161 update = 1;
12162 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12163 dd->unit);
12164 } else {
12165 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12166 hfi1_cdbg(CNTR,
12167 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12168 total_flits, (u64)CNTR_32BIT_MAX);
12169 if (total_flits >= CNTR_32BIT_MAX) {
12170 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12171 dd->unit);
12172 update = 1;
12173 }
12174 }
12175
12176 if (update) {
12177 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12178 for (i = 0; i < DEV_CNTR_LAST; i++) {
12179 entry = &dev_cntrs[i];
12180 if (entry->flags & CNTR_VL) {
12181 for (vl = 0; vl < C_VL_COUNT; vl++)
12182 read_dev_cntr(dd, i, vl);
12183 } else {
12184 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12185 }
12186 }
12187 ppd = (struct hfi1_pportdata *)(dd + 1);
12188 for (i = 0; i < dd->num_pports; i++, ppd++) {
12189 for (j = 0; j < PORT_CNTR_LAST; j++) {
12190 entry = &port_cntrs[j];
12191 if (entry->flags & CNTR_VL) {
12192 for (vl = 0; vl < C_VL_COUNT; vl++)
12193 read_port_cntr(ppd, j, vl);
12194 } else {
12195 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12196 }
12197 }
12198 }
12199
12200 /*
12201 * We want the value in the register. The goal is to keep track
12202 * of the number of "ticks" not the counter value. In other
12203 * words if the register rolls we want to notice it and go ahead
12204 * and force an update.
12205 */
12206 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12207 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12208 CNTR_MODE_R, 0);
12209
12210 entry = &dev_cntrs[C_DC_RCV_FLITS];
12211 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12212 CNTR_MODE_R, 0);
12213
12214 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12215 dd->unit, dd->last_tx, dd->last_rx);
12216
12217 } else {
12218 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12219 }
12220
Bart Van Assche48a0cc132016-06-03 12:09:56 -070012221 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012222}
12223
Jianxin Xiong09a79082016-10-25 13:12:40 -070012224#define C_MAX_NAME 16 /* 15 chars + one for /0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012225static int init_cntrs(struct hfi1_devdata *dd)
12226{
Dean Luickc024c552016-01-11 18:30:57 -050012227 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012228 size_t sz;
12229 char *p;
12230 char name[C_MAX_NAME];
12231 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012232 const char *bit_type_32 = ",32";
12233 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012234
12235 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053012236 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12237 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012238
12239 /***********************/
12240 /* per device counters */
12241 /***********************/
12242
12243 /* size names and determine how many we have*/
12244 dd->ndevcntrs = 0;
12245 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012246
12247 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012248 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12249 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12250 continue;
12251 }
12252
12253 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050012254 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012255 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012256 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012257 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012258 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012259 /* Add ",32" for 32-bit counters */
12260 if (dev_cntrs[i].flags & CNTR_32BIT)
12261 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012262 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012263 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012264 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012265 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050012266 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012267 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012268 snprintf(name, C_MAX_NAME, "%s%d",
12269 dev_cntrs[i].name, j);
12270 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012271 /* Add ",32" for 32-bit counters */
12272 if (dev_cntrs[i].flags & CNTR_32BIT)
12273 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012274 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012275 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012276 }
12277 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012278 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012279 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012280 /* Add ",32" for 32-bit counters */
12281 if (dev_cntrs[i].flags & CNTR_32BIT)
12282 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012283 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012284 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012285 }
12286 }
12287
12288 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012289 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012290 if (!dd->cntrs)
12291 goto bail;
12292
Dean Luickc024c552016-01-11 18:30:57 -050012293 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012294 if (!dd->scntrs)
12295 goto bail;
12296
Mike Marciniszyn77241052015-07-30 15:17:43 -040012297 /* allocate space for the counter names */
12298 dd->cntrnameslen = sz;
12299 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12300 if (!dd->cntrnames)
12301 goto bail;
12302
12303 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012304 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012305 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12306 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012307 } else if (dev_cntrs[i].flags & CNTR_VL) {
12308 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012309 snprintf(name, C_MAX_NAME, "%s%d",
12310 dev_cntrs[i].name,
12311 vl_from_idx(j));
12312 memcpy(p, name, strlen(name));
12313 p += strlen(name);
12314
12315 /* Counter is 32 bits */
12316 if (dev_cntrs[i].flags & CNTR_32BIT) {
12317 memcpy(p, bit_type_32, bit_type_32_sz);
12318 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012319 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012320
Mike Marciniszyn77241052015-07-30 15:17:43 -040012321 *p++ = '\n';
12322 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012323 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12324 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012325 snprintf(name, C_MAX_NAME, "%s%d",
12326 dev_cntrs[i].name, j);
12327 memcpy(p, name, strlen(name));
12328 p += strlen(name);
12329
12330 /* Counter is 32 bits */
12331 if (dev_cntrs[i].flags & CNTR_32BIT) {
12332 memcpy(p, bit_type_32, bit_type_32_sz);
12333 p += bit_type_32_sz;
12334 }
12335
12336 *p++ = '\n';
12337 }
12338 } else {
12339 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12340 p += strlen(dev_cntrs[i].name);
12341
12342 /* Counter is 32 bits */
12343 if (dev_cntrs[i].flags & CNTR_32BIT) {
12344 memcpy(p, bit_type_32, bit_type_32_sz);
12345 p += bit_type_32_sz;
12346 }
12347
12348 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012349 }
12350 }
12351
12352 /*********************/
12353 /* per port counters */
12354 /*********************/
12355
12356 /*
12357 * Go through the counters for the overflows and disable the ones we
12358 * don't need. This varies based on platform so we need to do it
12359 * dynamically here.
12360 */
12361 rcv_ctxts = dd->num_rcv_contexts;
12362 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12363 i <= C_RCV_HDR_OVF_LAST; i++) {
12364 port_cntrs[i].flags |= CNTR_DISABLED;
12365 }
12366
12367 /* size port counter names and determine how many we have*/
12368 sz = 0;
12369 dd->nportcntrs = 0;
12370 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012371 if (port_cntrs[i].flags & CNTR_DISABLED) {
12372 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12373 continue;
12374 }
12375
12376 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012377 port_cntrs[i].offset = dd->nportcntrs;
12378 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012379 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012380 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012381 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012382 /* Add ",32" for 32-bit counters */
12383 if (port_cntrs[i].flags & CNTR_32BIT)
12384 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012385 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012386 dd->nportcntrs++;
12387 }
12388 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012389 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012390 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012391 /* Add ",32" for 32-bit counters */
12392 if (port_cntrs[i].flags & CNTR_32BIT)
12393 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012394 port_cntrs[i].offset = dd->nportcntrs;
12395 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012396 }
12397 }
12398
12399 /* allocate space for the counter names */
12400 dd->portcntrnameslen = sz;
12401 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12402 if (!dd->portcntrnames)
12403 goto bail;
12404
12405 /* fill in port cntr names */
12406 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12407 if (port_cntrs[i].flags & CNTR_DISABLED)
12408 continue;
12409
12410 if (port_cntrs[i].flags & CNTR_VL) {
12411 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012412 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012413 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012414 memcpy(p, name, strlen(name));
12415 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012416
12417 /* Counter is 32 bits */
12418 if (port_cntrs[i].flags & CNTR_32BIT) {
12419 memcpy(p, bit_type_32, bit_type_32_sz);
12420 p += bit_type_32_sz;
12421 }
12422
Mike Marciniszyn77241052015-07-30 15:17:43 -040012423 *p++ = '\n';
12424 }
12425 } else {
12426 memcpy(p, port_cntrs[i].name,
12427 strlen(port_cntrs[i].name));
12428 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012429
12430 /* Counter is 32 bits */
12431 if (port_cntrs[i].flags & CNTR_32BIT) {
12432 memcpy(p, bit_type_32, bit_type_32_sz);
12433 p += bit_type_32_sz;
12434 }
12435
Mike Marciniszyn77241052015-07-30 15:17:43 -040012436 *p++ = '\n';
12437 }
12438 }
12439
12440 /* allocate per port storage for counter values */
12441 ppd = (struct hfi1_pportdata *)(dd + 1);
12442 for (i = 0; i < dd->num_pports; i++, ppd++) {
12443 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12444 if (!ppd->cntrs)
12445 goto bail;
12446
12447 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12448 if (!ppd->scntrs)
12449 goto bail;
12450 }
12451
12452 /* CPU counters need to be allocated and zeroed */
12453 if (init_cpu_counters(dd))
12454 goto bail;
12455
12456 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12457 return 0;
12458bail:
12459 free_cntrs(dd);
12460 return -ENOMEM;
12461}
12462
Mike Marciniszyn77241052015-07-30 15:17:43 -040012463static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12464{
12465 switch (chip_lstate) {
12466 default:
12467 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012468 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12469 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012470 /* fall through */
12471 case LSTATE_DOWN:
12472 return IB_PORT_DOWN;
12473 case LSTATE_INIT:
12474 return IB_PORT_INIT;
12475 case LSTATE_ARMED:
12476 return IB_PORT_ARMED;
12477 case LSTATE_ACTIVE:
12478 return IB_PORT_ACTIVE;
12479 }
12480}
12481
12482u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12483{
12484 /* look at the HFI meta-states only */
12485 switch (chip_pstate & 0xf0) {
12486 default:
12487 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012488 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012489 /* fall through */
12490 case PLS_DISABLED:
12491 return IB_PORTPHYSSTATE_DISABLED;
12492 case PLS_OFFLINE:
12493 return OPA_PORTPHYSSTATE_OFFLINE;
12494 case PLS_POLLING:
12495 return IB_PORTPHYSSTATE_POLLING;
12496 case PLS_CONFIGPHY:
12497 return IB_PORTPHYSSTATE_TRAINING;
12498 case PLS_LINKUP:
12499 return IB_PORTPHYSSTATE_LINKUP;
12500 case PLS_PHYTEST:
12501 return IB_PORTPHYSSTATE_PHY_TEST;
12502 }
12503}
12504
12505/* return the OPA port logical state name */
12506const char *opa_lstate_name(u32 lstate)
12507{
12508 static const char * const port_logical_names[] = {
12509 "PORT_NOP",
12510 "PORT_DOWN",
12511 "PORT_INIT",
12512 "PORT_ARMED",
12513 "PORT_ACTIVE",
12514 "PORT_ACTIVE_DEFER",
12515 };
12516 if (lstate < ARRAY_SIZE(port_logical_names))
12517 return port_logical_names[lstate];
12518 return "unknown";
12519}
12520
12521/* return the OPA port physical state name */
12522const char *opa_pstate_name(u32 pstate)
12523{
12524 static const char * const port_physical_names[] = {
12525 "PHYS_NOP",
12526 "reserved1",
12527 "PHYS_POLL",
12528 "PHYS_DISABLED",
12529 "PHYS_TRAINING",
12530 "PHYS_LINKUP",
12531 "PHYS_LINK_ERR_RECOVER",
12532 "PHYS_PHY_TEST",
12533 "reserved8",
12534 "PHYS_OFFLINE",
12535 "PHYS_GANGED",
12536 "PHYS_TEST",
12537 };
12538 if (pstate < ARRAY_SIZE(port_physical_names))
12539 return port_physical_names[pstate];
12540 return "unknown";
12541}
12542
12543/*
12544 * Read the hardware link state and set the driver's cached value of it.
12545 * Return the (new) current value.
12546 */
12547u32 get_logical_state(struct hfi1_pportdata *ppd)
12548{
12549 u32 new_state;
12550
12551 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12552 if (new_state != ppd->lstate) {
12553 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012554 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012555 ppd->lstate = new_state;
12556 }
12557 /*
12558 * Set port status flags in the page mapped into userspace
12559 * memory. Do it here to ensure a reliable state - this is
12560 * the only function called by all state handling code.
12561 * Always set the flags due to the fact that the cache value
12562 * might have been changed explicitly outside of this
12563 * function.
12564 */
12565 if (ppd->statusp) {
12566 switch (ppd->lstate) {
12567 case IB_PORT_DOWN:
12568 case IB_PORT_INIT:
12569 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12570 HFI1_STATUS_IB_READY);
12571 break;
12572 case IB_PORT_ARMED:
12573 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12574 break;
12575 case IB_PORT_ACTIVE:
12576 *ppd->statusp |= HFI1_STATUS_IB_READY;
12577 break;
12578 }
12579 }
12580 return ppd->lstate;
12581}
12582
12583/**
12584 * wait_logical_linkstate - wait for an IB link state change to occur
12585 * @ppd: port device
12586 * @state: the state to wait for
12587 * @msecs: the number of milliseconds to wait
12588 *
12589 * Wait up to msecs milliseconds for IB link state change to occur.
12590 * For now, take the easy polling route.
12591 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12592 */
12593static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12594 int msecs)
12595{
12596 unsigned long timeout;
12597
12598 timeout = jiffies + msecs_to_jiffies(msecs);
12599 while (1) {
12600 if (get_logical_state(ppd) == state)
12601 return 0;
12602 if (time_after(jiffies, timeout))
12603 break;
12604 msleep(20);
12605 }
12606 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12607
12608 return -ETIMEDOUT;
12609}
12610
12611u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12612{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012613 u32 pstate;
12614 u32 ib_pstate;
12615
12616 pstate = read_physical_state(ppd->dd);
12617 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012618 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012619 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012620 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12621 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12622 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012623 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012624 }
12625 return ib_pstate;
12626}
12627
Mike Marciniszyn77241052015-07-30 15:17:43 -040012628#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12629(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12630
12631#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12632(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12633
12634int hfi1_init_ctxt(struct send_context *sc)
12635{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012636 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012637 struct hfi1_devdata *dd = sc->dd;
12638 u64 reg;
12639 u8 set = (sc->type == SC_USER ?
12640 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12641 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12642 reg = read_kctxt_csr(dd, sc->hw_context,
12643 SEND_CTXT_CHECK_ENABLE);
12644 if (set)
12645 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12646 else
12647 SET_STATIC_RATE_CONTROL_SMASK(reg);
12648 write_kctxt_csr(dd, sc->hw_context,
12649 SEND_CTXT_CHECK_ENABLE, reg);
12650 }
12651 return 0;
12652}
12653
12654int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12655{
12656 int ret = 0;
12657 u64 reg;
12658
12659 if (dd->icode != ICODE_RTL_SILICON) {
12660 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12661 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12662 __func__);
12663 return -EINVAL;
12664 }
12665 reg = read_csr(dd, ASIC_STS_THERM);
12666 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12667 ASIC_STS_THERM_CURR_TEMP_MASK);
12668 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12669 ASIC_STS_THERM_LO_TEMP_MASK);
12670 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12671 ASIC_STS_THERM_HI_TEMP_MASK);
12672 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12673 ASIC_STS_THERM_CRIT_TEMP_MASK);
12674 /* triggers is a 3-bit value - 1 bit per trigger. */
12675 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12676
12677 return ret;
12678}
12679
12680/* ========================================================================= */
12681
12682/*
12683 * Enable/disable chip from delivering interrupts.
12684 */
12685void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12686{
12687 int i;
12688
12689 /*
12690 * In HFI, the mask needs to be 1 to allow interrupts.
12691 */
12692 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012693 /* enable all interrupts */
12694 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012695 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012696
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012697 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012698 } else {
12699 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012700 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012701 }
12702}
12703
12704/*
12705 * Clear all interrupt sources on the chip.
12706 */
12707static void clear_all_interrupts(struct hfi1_devdata *dd)
12708{
12709 int i;
12710
12711 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012712 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012713
12714 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12715 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12716 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12717 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12718 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12719 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12720 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12721 for (i = 0; i < dd->chip_send_contexts; i++)
12722 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12723 for (i = 0; i < dd->chip_sdma_engines; i++)
12724 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12725
12726 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12727 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12728 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12729}
12730
12731/* Move to pcie.c? */
12732static void disable_intx(struct pci_dev *pdev)
12733{
12734 pci_intx(pdev, 0);
12735}
12736
12737static void clean_up_interrupts(struct hfi1_devdata *dd)
12738{
12739 int i;
12740
12741 /* remove irqs - must happen before disabling/turning off */
12742 if (dd->num_msix_entries) {
12743 /* MSI-X */
12744 struct hfi1_msix_entry *me = dd->msix_entries;
12745
12746 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012747 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012748 continue;
12749 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012750 free_irq(me->msix.vector, me->arg);
12751 }
12752 } else {
12753 /* INTx */
12754 if (dd->requested_intx_irq) {
12755 free_irq(dd->pcidev->irq, dd);
12756 dd->requested_intx_irq = 0;
12757 }
12758 }
12759
12760 /* turn off interrupts */
12761 if (dd->num_msix_entries) {
12762 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012763 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012764 } else {
12765 /* INTx */
12766 disable_intx(dd->pcidev);
12767 }
12768
12769 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012770 kfree(dd->msix_entries);
12771 dd->msix_entries = NULL;
12772 dd->num_msix_entries = 0;
12773}
12774
12775/*
12776 * Remap the interrupt source from the general handler to the given MSI-X
12777 * interrupt.
12778 */
12779static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12780{
12781 u64 reg;
12782 int m, n;
12783
12784 /* clear from the handled mask of the general interrupt */
12785 m = isrc / 64;
12786 n = isrc % 64;
12787 dd->gi_mask[m] &= ~((u64)1 << n);
12788
12789 /* direct the chip source to the given MSI-X interrupt */
12790 m = isrc / 8;
12791 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012792 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12793 reg &= ~((u64)0xff << (8 * n));
12794 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12795 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012796}
12797
12798static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12799 int engine, int msix_intr)
12800{
12801 /*
12802 * SDMA engine interrupt sources grouped by type, rather than
12803 * engine. Per-engine interrupts are as follows:
12804 * SDMA
12805 * SDMAProgress
12806 * SDMAIdle
12807 */
Jubin John8638b772016-02-14 20:19:24 -080012808 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012809 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012810 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012811 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012812 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012813 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012814}
12815
Mike Marciniszyn77241052015-07-30 15:17:43 -040012816static int request_intx_irq(struct hfi1_devdata *dd)
12817{
12818 int ret;
12819
Jubin John98050712015-11-16 21:59:27 -050012820 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12821 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012822 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012823 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012824 if (ret)
12825 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012826 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012827 else
12828 dd->requested_intx_irq = 1;
12829 return ret;
12830}
12831
12832static int request_msix_irqs(struct hfi1_devdata *dd)
12833{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012834 int first_general, last_general;
12835 int first_sdma, last_sdma;
12836 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012837 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012838
12839 /* calculate the ranges we are going to use */
12840 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012841 last_general = first_general + 1;
12842 first_sdma = last_general;
12843 last_sdma = first_sdma + dd->num_sdma;
12844 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012845 last_rx = first_rx + dd->n_krcv_queues;
12846
12847 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012848 * Sanity check - the code expects all SDMA chip source
12849 * interrupts to be in the same CSR, starting at bit 0. Verify
12850 * that this is true by checking the bit location of the start.
12851 */
12852 BUILD_BUG_ON(IS_SDMA_START % 64);
12853
12854 for (i = 0; i < dd->num_msix_entries; i++) {
12855 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12856 const char *err_info;
12857 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012858 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012859 void *arg;
12860 int idx;
12861 struct hfi1_ctxtdata *rcd = NULL;
12862 struct sdma_engine *sde = NULL;
12863
12864 /* obtain the arguments to request_irq */
12865 if (first_general <= i && i < last_general) {
12866 idx = i - first_general;
12867 handler = general_interrupt;
12868 arg = dd;
12869 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012870 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012871 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012872 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012873 } else if (first_sdma <= i && i < last_sdma) {
12874 idx = i - first_sdma;
12875 sde = &dd->per_sdma[idx];
12876 handler = sdma_interrupt;
12877 arg = sde;
12878 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012879 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012880 err_info = "sdma";
12881 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012882 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012883 } else if (first_rx <= i && i < last_rx) {
12884 idx = i - first_rx;
12885 rcd = dd->rcd[idx];
12886 /* no interrupt if no rcd */
12887 if (!rcd)
12888 continue;
12889 /*
12890 * Set the interrupt register and mask for this
12891 * context's interrupt.
12892 */
Jubin John8638b772016-02-14 20:19:24 -080012893 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012894 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012895 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012896 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012897 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012898 arg = rcd;
12899 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012900 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012901 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012902 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012903 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012904 } else {
12905 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012906 * ignore it
12907 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012908 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012909 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012910 continue;
12911 }
12912 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012913 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012914 continue;
12915 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012916 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012917
Dean Luickf4f30031c2015-10-26 10:28:44 -040012918 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012919 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012920 if (ret) {
12921 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012922 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12923 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012924 return ret;
12925 }
12926 /*
12927 * assign arg after request_irq call, so it will be
12928 * cleaned up
12929 */
12930 me->arg = arg;
12931
Mitko Haralanov957558c2016-02-03 14:33:40 -080012932 ret = hfi1_get_irq_affinity(dd, me);
12933 if (ret)
12934 dd_dev_err(dd,
12935 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012936 }
12937
Mike Marciniszyn77241052015-07-30 15:17:43 -040012938 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012939}
12940
12941/*
12942 * Set the general handler to accept all interrupts, remap all
12943 * chip interrupts back to MSI-X 0.
12944 */
12945static void reset_interrupts(struct hfi1_devdata *dd)
12946{
12947 int i;
12948
12949 /* all interrupts handled by the general handler */
12950 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12951 dd->gi_mask[i] = ~(u64)0;
12952
12953 /* all chip interrupts map to MSI-X 0 */
12954 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012955 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012956}
12957
12958static int set_up_interrupts(struct hfi1_devdata *dd)
12959{
12960 struct hfi1_msix_entry *entries;
12961 u32 total, request;
12962 int i, ret;
12963 int single_interrupt = 0; /* we expect to have all the interrupts */
12964
12965 /*
12966 * Interrupt count:
12967 * 1 general, "slow path" interrupt (includes the SDMA engines
12968 * slow source, SDMACleanupDone)
12969 * N interrupts - one per used SDMA engine
12970 * M interrupt - one per kernel receive context
12971 */
12972 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12973
12974 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12975 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012976 ret = -ENOMEM;
12977 goto fail;
12978 }
12979 /* 1-1 MSI-X entry assignment */
12980 for (i = 0; i < total; i++)
12981 entries[i].msix.entry = i;
12982
12983 /* ask for MSI-X interrupts */
12984 request = total;
12985 request_msix(dd, &request, entries);
12986
12987 if (request == 0) {
12988 /* using INTx */
12989 /* dd->num_msix_entries already zero */
12990 kfree(entries);
12991 single_interrupt = 1;
12992 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12993 } else {
12994 /* using MSI-X */
12995 dd->num_msix_entries = request;
12996 dd->msix_entries = entries;
12997
12998 if (request != total) {
12999 /* using MSI-X, with reduced interrupts */
13000 dd_dev_err(
13001 dd,
13002 "cannot handle reduced interrupt case, want %u, got %u\n",
13003 total, request);
13004 ret = -EINVAL;
13005 goto fail;
13006 }
13007 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13008 }
13009
13010 /* mask all interrupts */
13011 set_intr_state(dd, 0);
13012 /* clear all pending interrupts */
13013 clear_all_interrupts(dd);
13014
13015 /* reset general handler mask, chip MSI-X mappings */
13016 reset_interrupts(dd);
13017
13018 if (single_interrupt)
13019 ret = request_intx_irq(dd);
13020 else
13021 ret = request_msix_irqs(dd);
13022 if (ret)
13023 goto fail;
13024
13025 return 0;
13026
13027fail:
13028 clean_up_interrupts(dd);
13029 return ret;
13030}
13031
13032/*
13033 * Set up context values in dd. Sets:
13034 *
13035 * num_rcv_contexts - number of contexts being used
13036 * n_krcv_queues - number of kernel contexts
13037 * first_user_ctxt - first non-kernel context in array of contexts
13038 * freectxts - number of free user contexts
13039 * num_send_contexts - number of PIO send contexts being used
13040 */
13041static int set_up_context_variables(struct hfi1_devdata *dd)
13042{
Harish Chegondi429b6a72016-08-31 07:24:40 -070013043 unsigned long num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013044 int total_contexts;
13045 int ret;
13046 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070013047 int qos_rmt_count;
13048 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013049
13050 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013051 * Kernel receive contexts:
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013052 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070013053 * - Context 1 - first kernel context
13054 * - Context 2 - second kernel context
13055 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040013056 */
13057 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013058 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013059 * n_krcvqs is the sum of module parameter kernel receive
13060 * contexts, krcvqs[]. It does not include the control
13061 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013062 */
Dean Luick33a9eb52016-04-12 10:50:22 -070013063 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013064 else
Harish Chegondi8784ac02016-07-25 13:38:50 -070013065 num_kernel_contexts = DEFAULT_KRCVQS + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013066 /*
13067 * Every kernel receive context needs an ACK send context.
13068 * one send context is allocated for each VL{0-7} and VL15
13069 */
13070 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13071 dd_dev_err(dd,
Harish Chegondi429b6a72016-08-31 07:24:40 -070013072 "Reducing # kernel rcv contexts to: %d, from %lu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013073 (int)(dd->chip_send_contexts - num_vls - 1),
Harish Chegondi429b6a72016-08-31 07:24:40 -070013074 num_kernel_contexts);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013075 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13076 }
13077 /*
Jubin John0852d242016-04-12 11:30:08 -070013078 * User contexts:
13079 * - default to 1 user context per real (non-HT) CPU core if
13080 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040013081 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050013082 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070013083 num_user_contexts =
Dennis Dalessandro41973442016-07-25 07:52:36 -070013084 cpumask_weight(&node_affinity.real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013085
13086 total_contexts = num_kernel_contexts + num_user_contexts;
13087
13088 /*
13089 * Adjust the counts given a global max.
13090 */
13091 if (total_contexts > dd->chip_rcv_contexts) {
13092 dd_dev_err(dd,
13093 "Reducing # user receive contexts to: %d, from %d\n",
13094 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13095 (int)num_user_contexts);
13096 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13097 /* recalculate */
13098 total_contexts = num_kernel_contexts + num_user_contexts;
13099 }
13100
Dean Luick8f000f72016-04-12 11:32:06 -070013101 /* each user context requires an entry in the RMT */
13102 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13103 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13104 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13105 dd_dev_err(dd,
13106 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13107 (int)num_user_contexts,
13108 user_rmt_reduced);
13109 /* recalculate */
13110 num_user_contexts = user_rmt_reduced;
13111 total_contexts = num_kernel_contexts + num_user_contexts;
13112 }
13113
Mike Marciniszyn77241052015-07-30 15:17:43 -040013114 /* the first N are kernel contexts, the rest are user contexts */
13115 dd->num_rcv_contexts = total_contexts;
13116 dd->n_krcv_queues = num_kernel_contexts;
13117 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013118 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013119 dd->freectxts = num_user_contexts;
13120 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013121 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13122 (int)dd->chip_rcv_contexts,
13123 (int)dd->num_rcv_contexts,
13124 (int)dd->n_krcv_queues,
13125 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013126
13127 /*
13128 * Receive array allocation:
13129 * All RcvArray entries are divided into groups of 8. This
13130 * is required by the hardware and will speed up writes to
13131 * consecutive entries by using write-combining of the entire
13132 * cacheline.
13133 *
13134 * The number of groups are evenly divided among all contexts.
13135 * any left over groups will be given to the first N user
13136 * contexts.
13137 */
13138 dd->rcv_entries.group_size = RCV_INCREMENT;
13139 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13140 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13141 dd->rcv_entries.nctxt_extra = ngroups -
13142 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13143 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13144 dd->rcv_entries.ngroups,
13145 dd->rcv_entries.nctxt_extra);
13146 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13147 MAX_EAGER_ENTRIES * 2) {
13148 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13149 dd->rcv_entries.group_size;
13150 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013151 "RcvArray group count too high, change to %u\n",
13152 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013153 dd->rcv_entries.nctxt_extra = 0;
13154 }
13155 /*
13156 * PIO send contexts
13157 */
13158 ret = init_sc_pools_and_sizes(dd);
13159 if (ret >= 0) { /* success */
13160 dd->num_send_contexts = ret;
13161 dd_dev_info(
13162 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013163 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013164 dd->chip_send_contexts,
13165 dd->num_send_contexts,
13166 dd->sc_sizes[SC_KERNEL].count,
13167 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013168 dd->sc_sizes[SC_USER].count,
13169 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013170 ret = 0; /* success */
13171 }
13172
13173 return ret;
13174}
13175
13176/*
13177 * Set the device/port partition key table. The MAD code
13178 * will ensure that, at least, the partial management
13179 * partition key is present in the table.
13180 */
13181static void set_partition_keys(struct hfi1_pportdata *ppd)
13182{
13183 struct hfi1_devdata *dd = ppd->dd;
13184 u64 reg = 0;
13185 int i;
13186
13187 dd_dev_info(dd, "Setting partition keys\n");
13188 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13189 reg |= (ppd->pkeys[i] &
13190 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13191 ((i % 4) *
13192 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13193 /* Each register holds 4 PKey values. */
13194 if ((i % 4) == 3) {
13195 write_csr(dd, RCV_PARTITION_KEY +
13196 ((i - 3) * 2), reg);
13197 reg = 0;
13198 }
13199 }
13200
13201 /* Always enable HW pkeys check when pkeys table is set */
13202 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13203}
13204
13205/*
13206 * These CSRs and memories are uninitialized on reset and must be
13207 * written before reading to set the ECC/parity bits.
13208 *
13209 * NOTE: All user context CSRs that are not mmaped write-only
13210 * (e.g. the TID flows) must be initialized even if the driver never
13211 * reads them.
13212 */
13213static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13214{
13215 int i, j;
13216
13217 /* CceIntMap */
13218 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013219 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013220
13221 /* SendCtxtCreditReturnAddr */
13222 for (i = 0; i < dd->chip_send_contexts; i++)
13223 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13224
13225 /* PIO Send buffers */
13226 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080013227 /*
13228 * These are not normally read, and (presently) have no method
13229 * to be read, so are not pre-initialized
13230 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013231
13232 /* RcvHdrAddr */
13233 /* RcvHdrTailAddr */
13234 /* RcvTidFlowTable */
13235 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13236 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13237 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13238 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080013239 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013240 }
13241
13242 /* RcvArray */
13243 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080013244 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013245 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013246
13247 /* RcvQPMapTable */
13248 for (i = 0; i < 32; i++)
13249 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13250}
13251
13252/*
13253 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13254 */
13255static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13256 u64 ctrl_bits)
13257{
13258 unsigned long timeout;
13259 u64 reg;
13260
13261 /* is the condition present? */
13262 reg = read_csr(dd, CCE_STATUS);
13263 if ((reg & status_bits) == 0)
13264 return;
13265
13266 /* clear the condition */
13267 write_csr(dd, CCE_CTRL, ctrl_bits);
13268
13269 /* wait for the condition to clear */
13270 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13271 while (1) {
13272 reg = read_csr(dd, CCE_STATUS);
13273 if ((reg & status_bits) == 0)
13274 return;
13275 if (time_after(jiffies, timeout)) {
13276 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013277 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13278 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013279 return;
13280 }
13281 udelay(1);
13282 }
13283}
13284
13285/* set CCE CSRs to chip reset defaults */
13286static void reset_cce_csrs(struct hfi1_devdata *dd)
13287{
13288 int i;
13289
13290 /* CCE_REVISION read-only */
13291 /* CCE_REVISION2 read-only */
13292 /* CCE_CTRL - bits clear automatically */
13293 /* CCE_STATUS read-only, use CceCtrl to clear */
13294 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13295 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13296 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13297 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13298 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13299 /* CCE_ERR_STATUS read-only */
13300 write_csr(dd, CCE_ERR_MASK, 0);
13301 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13302 /* CCE_ERR_FORCE leave alone */
13303 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13304 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13305 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13306 /* CCE_PCIE_CTRL leave alone */
13307 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13308 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13309 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013310 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013311 }
13312 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13313 /* CCE_MSIX_PBA read-only */
13314 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13315 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13316 }
13317 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13318 write_csr(dd, CCE_INT_MAP, 0);
13319 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13320 /* CCE_INT_STATUS read-only */
13321 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13322 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13323 /* CCE_INT_FORCE leave alone */
13324 /* CCE_INT_BLOCKED read-only */
13325 }
13326 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13327 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13328}
13329
Mike Marciniszyn77241052015-07-30 15:17:43 -040013330/* set MISC CSRs to chip reset defaults */
13331static void reset_misc_csrs(struct hfi1_devdata *dd)
13332{
13333 int i;
13334
13335 for (i = 0; i < 32; i++) {
13336 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13337 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13338 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13339 }
Jubin John4d114fd2016-02-14 20:21:43 -080013340 /*
13341 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13342 * only be written 128-byte chunks
13343 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013344 /* init RSA engine to clear lingering errors */
13345 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13346 write_csr(dd, MISC_CFG_RSA_MU, 0);
13347 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13348 /* MISC_STS_8051_DIGEST read-only */
13349 /* MISC_STS_SBM_DIGEST read-only */
13350 /* MISC_STS_PCIE_DIGEST read-only */
13351 /* MISC_STS_FAB_DIGEST read-only */
13352 /* MISC_ERR_STATUS read-only */
13353 write_csr(dd, MISC_ERR_MASK, 0);
13354 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13355 /* MISC_ERR_FORCE leave alone */
13356}
13357
13358/* set TXE CSRs to chip reset defaults */
13359static void reset_txe_csrs(struct hfi1_devdata *dd)
13360{
13361 int i;
13362
13363 /*
13364 * TXE Kernel CSRs
13365 */
13366 write_csr(dd, SEND_CTRL, 0);
13367 __cm_reset(dd, 0); /* reset CM internal state */
13368 /* SEND_CONTEXTS read-only */
13369 /* SEND_DMA_ENGINES read-only */
13370 /* SEND_PIO_MEM_SIZE read-only */
13371 /* SEND_DMA_MEM_SIZE read-only */
13372 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13373 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13374 /* SEND_PIO_ERR_STATUS read-only */
13375 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13376 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13377 /* SEND_PIO_ERR_FORCE leave alone */
13378 /* SEND_DMA_ERR_STATUS read-only */
13379 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13380 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13381 /* SEND_DMA_ERR_FORCE leave alone */
13382 /* SEND_EGRESS_ERR_STATUS read-only */
13383 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13384 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13385 /* SEND_EGRESS_ERR_FORCE leave alone */
13386 write_csr(dd, SEND_BTH_QP, 0);
13387 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13388 write_csr(dd, SEND_SC2VLT0, 0);
13389 write_csr(dd, SEND_SC2VLT1, 0);
13390 write_csr(dd, SEND_SC2VLT2, 0);
13391 write_csr(dd, SEND_SC2VLT3, 0);
13392 write_csr(dd, SEND_LEN_CHECK0, 0);
13393 write_csr(dd, SEND_LEN_CHECK1, 0);
13394 /* SEND_ERR_STATUS read-only */
13395 write_csr(dd, SEND_ERR_MASK, 0);
13396 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13397 /* SEND_ERR_FORCE read-only */
13398 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013399 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013400 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013401 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13402 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13403 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013404 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013405 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013406 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013407 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013408 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013409 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013410 /* SEND_CM_CREDIT_USED_STATUS read-only */
13411 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13412 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13413 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13414 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13415 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13416 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013417 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013418 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13419 /* SEND_CM_CREDIT_USED_VL read-only */
13420 /* SEND_CM_CREDIT_USED_VL15 read-only */
13421 /* SEND_EGRESS_CTXT_STATUS read-only */
13422 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13423 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13424 /* SEND_EGRESS_ERR_INFO read-only */
13425 /* SEND_EGRESS_ERR_SOURCE read-only */
13426
13427 /*
13428 * TXE Per-Context CSRs
13429 */
13430 for (i = 0; i < dd->chip_send_contexts; i++) {
13431 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13432 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13433 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13434 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13435 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13436 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13437 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13438 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13439 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13440 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13441 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13442 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13443 }
13444
13445 /*
13446 * TXE Per-SDMA CSRs
13447 */
13448 for (i = 0; i < dd->chip_sdma_engines; i++) {
13449 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13450 /* SEND_DMA_STATUS read-only */
13451 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13452 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13453 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13454 /* SEND_DMA_HEAD read-only */
13455 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13456 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13457 /* SEND_DMA_IDLE_CNT read-only */
13458 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13459 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13460 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13461 /* SEND_DMA_ENG_ERR_STATUS read-only */
13462 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13463 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13464 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13465 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13466 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13467 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13468 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13469 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13470 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13471 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13472 }
13473}
13474
13475/*
13476 * Expect on entry:
13477 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13478 */
13479static void init_rbufs(struct hfi1_devdata *dd)
13480{
13481 u64 reg;
13482 int count;
13483
13484 /*
13485 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13486 * clear.
13487 */
13488 count = 0;
13489 while (1) {
13490 reg = read_csr(dd, RCV_STATUS);
13491 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13492 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13493 break;
13494 /*
13495 * Give up after 1ms - maximum wait time.
13496 *
Harish Chegondie8a70af2016-09-25 07:42:01 -070013497 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
Mike Marciniszyn77241052015-07-30 15:17:43 -040013498 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
Harish Chegondie8a70af2016-09-25 07:42:01 -070013499 * 136 KB / (66% * 250MB/s) = 844us
Mike Marciniszyn77241052015-07-30 15:17:43 -040013500 */
13501 if (count++ > 500) {
13502 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013503 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13504 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013505 break;
13506 }
13507 udelay(2); /* do not busy-wait the CSR */
13508 }
13509
13510 /* start the init - expect RcvCtrl to be 0 */
13511 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13512
13513 /*
13514 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13515 * period after the write before RcvStatus.RxRbufInitDone is valid.
13516 * The delay in the first run through the loop below is sufficient and
13517 * required before the first read of RcvStatus.RxRbufInintDone.
13518 */
13519 read_csr(dd, RCV_CTRL);
13520
13521 /* wait for the init to finish */
13522 count = 0;
13523 while (1) {
13524 /* delay is required first time through - see above */
13525 udelay(2); /* do not busy-wait the CSR */
13526 reg = read_csr(dd, RCV_STATUS);
13527 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13528 break;
13529
13530 /* give up after 100us - slowest possible at 33MHz is 73us */
13531 if (count++ > 50) {
13532 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013533 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13534 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013535 break;
13536 }
13537 }
13538}
13539
13540/* set RXE CSRs to chip reset defaults */
13541static void reset_rxe_csrs(struct hfi1_devdata *dd)
13542{
13543 int i, j;
13544
13545 /*
13546 * RXE Kernel CSRs
13547 */
13548 write_csr(dd, RCV_CTRL, 0);
13549 init_rbufs(dd);
13550 /* RCV_STATUS read-only */
13551 /* RCV_CONTEXTS read-only */
13552 /* RCV_ARRAY_CNT read-only */
13553 /* RCV_BUF_SIZE read-only */
13554 write_csr(dd, RCV_BTH_QP, 0);
13555 write_csr(dd, RCV_MULTICAST, 0);
13556 write_csr(dd, RCV_BYPASS, 0);
13557 write_csr(dd, RCV_VL15, 0);
13558 /* this is a clear-down */
13559 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013560 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013561 /* RCV_ERR_STATUS read-only */
13562 write_csr(dd, RCV_ERR_MASK, 0);
13563 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13564 /* RCV_ERR_FORCE leave alone */
13565 for (i = 0; i < 32; i++)
13566 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13567 for (i = 0; i < 4; i++)
13568 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13569 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13570 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13571 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13572 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13573 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13574 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13575 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13576 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13577 }
13578 for (i = 0; i < 32; i++)
13579 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13580
13581 /*
13582 * RXE Kernel and User Per-Context CSRs
13583 */
13584 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13585 /* kernel */
13586 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13587 /* RCV_CTXT_STATUS read-only */
13588 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13589 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13590 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13591 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13592 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13593 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13594 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13595 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13596 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13597 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13598
13599 /* user */
13600 /* RCV_HDR_TAIL read-only */
13601 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13602 /* RCV_EGR_INDEX_TAIL read-only */
13603 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13604 /* RCV_EGR_OFFSET_TAIL read-only */
13605 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013606 write_uctxt_csr(dd, i,
13607 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013608 }
13609 }
13610}
13611
13612/*
13613 * Set sc2vl tables.
13614 *
13615 * They power on to zeros, so to avoid send context errors
13616 * they need to be set:
13617 *
13618 * SC 0-7 -> VL 0-7 (respectively)
13619 * SC 15 -> VL 15
13620 * otherwise
13621 * -> VL 0
13622 */
13623static void init_sc2vl_tables(struct hfi1_devdata *dd)
13624{
13625 int i;
13626 /* init per architecture spec, constrained by hardware capability */
13627
13628 /* HFI maps sent packets */
13629 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13630 0,
13631 0, 0, 1, 1,
13632 2, 2, 3, 3,
13633 4, 4, 5, 5,
13634 6, 6, 7, 7));
13635 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13636 1,
13637 8, 0, 9, 0,
13638 10, 0, 11, 0,
13639 12, 0, 13, 0,
13640 14, 0, 15, 15));
13641 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13642 2,
13643 16, 0, 17, 0,
13644 18, 0, 19, 0,
13645 20, 0, 21, 0,
13646 22, 0, 23, 0));
13647 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13648 3,
13649 24, 0, 25, 0,
13650 26, 0, 27, 0,
13651 28, 0, 29, 0,
13652 30, 0, 31, 0));
13653
13654 /* DC maps received packets */
13655 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13656 15_0,
13657 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13658 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13659 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13660 31_16,
13661 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13662 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13663
13664 /* initialize the cached sc2vl values consistently with h/w */
13665 for (i = 0; i < 32; i++) {
13666 if (i < 8 || i == 15)
13667 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13668 else
13669 *((u8 *)(dd->sc2vl) + i) = 0;
13670 }
13671}
13672
13673/*
13674 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13675 * depend on the chip going through a power-on reset - a driver may be loaded
13676 * and unloaded many times.
13677 *
13678 * Do not write any CSR values to the chip in this routine - there may be
13679 * a reset following the (possible) FLR in this routine.
13680 *
13681 */
13682static void init_chip(struct hfi1_devdata *dd)
13683{
13684 int i;
13685
13686 /*
13687 * Put the HFI CSRs in a known state.
13688 * Combine this with a DC reset.
13689 *
13690 * Stop the device from doing anything while we do a
13691 * reset. We know there are no other active users of
13692 * the device since we are now in charge. Turn off
13693 * off all outbound and inbound traffic and make sure
13694 * the device does not generate any interrupts.
13695 */
13696
13697 /* disable send contexts and SDMA engines */
13698 write_csr(dd, SEND_CTRL, 0);
13699 for (i = 0; i < dd->chip_send_contexts; i++)
13700 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13701 for (i = 0; i < dd->chip_sdma_engines; i++)
13702 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13703 /* disable port (turn off RXE inbound traffic) and contexts */
13704 write_csr(dd, RCV_CTRL, 0);
13705 for (i = 0; i < dd->chip_rcv_contexts; i++)
13706 write_csr(dd, RCV_CTXT_CTRL, 0);
13707 /* mask all interrupt sources */
13708 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013709 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013710
13711 /*
13712 * DC Reset: do a full DC reset before the register clear.
13713 * A recommended length of time to hold is one CSR read,
13714 * so reread the CceDcCtrl. Then, hold the DC in reset
13715 * across the clear.
13716 */
13717 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013718 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013719
13720 if (use_flr) {
13721 /*
13722 * A FLR will reset the SPC core and part of the PCIe.
13723 * The parts that need to be restored have already been
13724 * saved.
13725 */
13726 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13727
13728 /* do the FLR, the DC reset will remain */
13729 hfi1_pcie_flr(dd);
13730
13731 /* restore command and BARs */
13732 restore_pci_variables(dd);
13733
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013734 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013735 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13736 hfi1_pcie_flr(dd);
13737 restore_pci_variables(dd);
13738 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013739 } else {
13740 dd_dev_info(dd, "Resetting CSRs with writes\n");
13741 reset_cce_csrs(dd);
13742 reset_txe_csrs(dd);
13743 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013744 reset_misc_csrs(dd);
13745 }
13746 /* clear the DC reset */
13747 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013748
Mike Marciniszyn77241052015-07-30 15:17:43 -040013749 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013750 setextled(dd, 0);
13751
Mike Marciniszyn77241052015-07-30 15:17:43 -040013752 /*
13753 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013754 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013755 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013756 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013757 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013758 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013759 * I2CCLK and I2CDAT will change per direction, and INT_N and
13760 * MODPRS_N are input only and their value is ignored.
13761 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013762 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13763 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013764 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013765}
13766
13767static void init_early_variables(struct hfi1_devdata *dd)
13768{
13769 int i;
13770
13771 /* assign link credit variables */
13772 dd->vau = CM_VAU;
13773 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013774 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013775 dd->link_credits--;
13776 dd->vcu = cu_to_vcu(hfi1_cu);
13777 /* enough room for 8 MAD packets plus header - 17K */
13778 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13779 if (dd->vl15_init > dd->link_credits)
13780 dd->vl15_init = dd->link_credits;
13781
13782 write_uninitialized_csrs_and_memories(dd);
13783
13784 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13785 for (i = 0; i < dd->num_pports; i++) {
13786 struct hfi1_pportdata *ppd = &dd->pport[i];
13787
13788 set_partition_keys(ppd);
13789 }
13790 init_sc2vl_tables(dd);
13791}
13792
13793static void init_kdeth_qp(struct hfi1_devdata *dd)
13794{
13795 /* user changed the KDETH_QP */
13796 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13797 /* out of range or illegal value */
13798 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13799 kdeth_qp = 0;
13800 }
13801 if (kdeth_qp == 0) /* not set, or failed range check */
13802 kdeth_qp = DEFAULT_KDETH_QP;
13803
13804 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013805 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13806 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013807
13808 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013809 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13810 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013811}
13812
13813/**
13814 * init_qpmap_table
13815 * @dd - device data
13816 * @first_ctxt - first context
13817 * @last_ctxt - first context
13818 *
13819 * This return sets the qpn mapping table that
13820 * is indexed by qpn[8:1].
13821 *
13822 * The routine will round robin the 256 settings
13823 * from first_ctxt to last_ctxt.
13824 *
13825 * The first/last looks ahead to having specialized
13826 * receive contexts for mgmt and bypass. Normal
13827 * verbs traffic will assumed to be on a range
13828 * of receive contexts.
13829 */
13830static void init_qpmap_table(struct hfi1_devdata *dd,
13831 u32 first_ctxt,
13832 u32 last_ctxt)
13833{
13834 u64 reg = 0;
13835 u64 regno = RCV_QP_MAP_TABLE;
13836 int i;
13837 u64 ctxt = first_ctxt;
13838
Dean Luick60d585ad2016-04-12 10:50:35 -070013839 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013840 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013841 ctxt++;
13842 if (ctxt > last_ctxt)
13843 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070013844 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013845 write_csr(dd, regno, reg);
13846 reg = 0;
13847 regno += 8;
13848 }
13849 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013850
13851 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13852 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13853}
13854
Dean Luick372cc85a2016-04-12 11:30:51 -070013855struct rsm_map_table {
13856 u64 map[NUM_MAP_REGS];
13857 unsigned int used;
13858};
13859
Dean Luickb12349a2016-04-12 11:31:33 -070013860struct rsm_rule_data {
13861 u8 offset;
13862 u8 pkt_type;
13863 u32 field1_off;
13864 u32 field2_off;
13865 u32 index1_off;
13866 u32 index1_width;
13867 u32 index2_off;
13868 u32 index2_width;
13869 u32 mask1;
13870 u32 value1;
13871 u32 mask2;
13872 u32 value2;
13873};
13874
Dean Luick372cc85a2016-04-12 11:30:51 -070013875/*
13876 * Return an initialized RMT map table for users to fill in. OK if it
13877 * returns NULL, indicating no table.
13878 */
13879static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13880{
13881 struct rsm_map_table *rmt;
13882 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13883
13884 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13885 if (rmt) {
13886 memset(rmt->map, rxcontext, sizeof(rmt->map));
13887 rmt->used = 0;
13888 }
13889
13890 return rmt;
13891}
13892
13893/*
13894 * Write the final RMT map table to the chip and free the table. OK if
13895 * table is NULL.
13896 */
13897static void complete_rsm_map_table(struct hfi1_devdata *dd,
13898 struct rsm_map_table *rmt)
13899{
13900 int i;
13901
13902 if (rmt) {
13903 /* write table to chip */
13904 for (i = 0; i < NUM_MAP_REGS; i++)
13905 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13906
13907 /* enable RSM */
13908 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13909 }
13910}
13911
Dean Luickb12349a2016-04-12 11:31:33 -070013912/*
13913 * Add a receive side mapping rule.
13914 */
13915static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13916 struct rsm_rule_data *rrd)
13917{
13918 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13919 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13920 1ull << rule_index | /* enable bit */
13921 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13922 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13923 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13924 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13925 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13926 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13927 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13928 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13929 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13930 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13931 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13932 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13933 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13934}
13935
Dean Luick4a818be2016-04-12 11:31:11 -070013936/* return the number of RSM map table entries that will be used for QOS */
13937static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13938 unsigned int *np)
13939{
13940 int i;
13941 unsigned int m, n;
13942 u8 max_by_vl = 0;
13943
13944 /* is QOS active at all? */
13945 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13946 num_vls == 1 ||
13947 krcvqsset <= 1)
13948 goto no_qos;
13949
13950 /* determine bits for qpn */
13951 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13952 if (krcvqs[i] > max_by_vl)
13953 max_by_vl = krcvqs[i];
13954 if (max_by_vl > 32)
13955 goto no_qos;
13956 m = ilog2(__roundup_pow_of_two(max_by_vl));
13957
13958 /* determine bits for vl */
13959 n = ilog2(__roundup_pow_of_two(num_vls));
13960
13961 /* reject if too much is used */
13962 if ((m + n) > 7)
13963 goto no_qos;
13964
13965 if (mp)
13966 *mp = m;
13967 if (np)
13968 *np = n;
13969
13970 return 1 << (m + n);
13971
13972no_qos:
13973 if (mp)
13974 *mp = 0;
13975 if (np)
13976 *np = 0;
13977 return 0;
13978}
13979
Mike Marciniszyn77241052015-07-30 15:17:43 -040013980/**
13981 * init_qos - init RX qos
13982 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070013983 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040013984 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013985 * This routine initializes Rule 0 and the RSM map table to implement
13986 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040013987 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013988 * If all of the limit tests succeed, qos is applied based on the array
13989 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013990 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013991 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13992 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013993 */
Dean Luick372cc85a2016-04-12 11:30:51 -070013994static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013995{
Dean Luickb12349a2016-04-12 11:31:33 -070013996 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013997 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070013998 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013999 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014000
Dean Luick4a818be2016-04-12 11:31:11 -070014001 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014002 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014003 rmt_entries = qos_rmt_entries(dd, &m, &n);
14004 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014005 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014006 qpns_per_vl = 1 << m;
14007
Dean Luick372cc85a2016-04-12 11:30:51 -070014008 /* enough room in the map table? */
14009 rmt_entries = 1 << (m + n);
14010 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050014011 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014012
Dean Luick372cc85a2016-04-12 11:30:51 -070014013 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070014014 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014015 unsigned tctxt;
14016
14017 for (qpn = 0, tctxt = ctxt;
14018 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14019 unsigned idx, regoff, regidx;
14020
Dean Luick372cc85a2016-04-12 11:30:51 -070014021 /* generate the index the hardware will produce */
14022 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014023 regoff = (idx % 8) * 8;
14024 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070014025 /* replace default with context number */
14026 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040014027 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14028 << regoff);
14029 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070014030 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014031 if (tctxt == ctxt + krcvqs[i])
14032 tctxt = ctxt;
14033 }
14034 ctxt += krcvqs[i];
14035 }
Dean Luickb12349a2016-04-12 11:31:33 -070014036
14037 rrd.offset = rmt->used;
14038 rrd.pkt_type = 2;
14039 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14040 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14041 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14042 rrd.index1_width = n;
14043 rrd.index2_off = QPN_SELECT_OFFSET;
14044 rrd.index2_width = m + n;
14045 rrd.mask1 = LRH_BTH_MASK;
14046 rrd.value1 = LRH_BTH_VALUE;
14047 rrd.mask2 = LRH_SC_MASK;
14048 rrd.value2 = LRH_SC_VALUE;
14049
14050 /* add rule 0 */
14051 add_rsm_rule(dd, 0, &rrd);
14052
Dean Luick372cc85a2016-04-12 11:30:51 -070014053 /* mark RSM map entries as used */
14054 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070014055 /* map everything else to the mcast/err/vl15 context */
14056 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014057 dd->qos_shift = n + 1;
14058 return;
14059bail:
14060 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050014061 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014062}
14063
Dean Luick8f000f72016-04-12 11:32:06 -070014064static void init_user_fecn_handling(struct hfi1_devdata *dd,
14065 struct rsm_map_table *rmt)
14066{
14067 struct rsm_rule_data rrd;
14068 u64 reg;
14069 int i, idx, regoff, regidx;
14070 u8 offset;
14071
14072 /* there needs to be enough room in the map table */
14073 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14074 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14075 return;
14076 }
14077
14078 /*
14079 * RSM will extract the destination context as an index into the
14080 * map table. The destination contexts are a sequential block
14081 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
14082 * Map entries are accessed as offset + extracted value. Adjust
14083 * the added offset so this sequence can be placed anywhere in
14084 * the table - as long as the entries themselves do not wrap.
14085 * There are only enough bits in offset for the table size, so
14086 * start with that to allow for a "negative" offset.
14087 */
14088 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14089 (int)dd->first_user_ctxt);
14090
14091 for (i = dd->first_user_ctxt, idx = rmt->used;
14092 i < dd->num_rcv_contexts; i++, idx++) {
14093 /* replace with identity mapping */
14094 regoff = (idx % 8) * 8;
14095 regidx = idx / 8;
14096 reg = rmt->map[regidx];
14097 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14098 reg |= (u64)i << regoff;
14099 rmt->map[regidx] = reg;
14100 }
14101
14102 /*
14103 * For RSM intercept of Expected FECN packets:
14104 * o packet type 0 - expected
14105 * o match on F (bit 95), using select/match 1, and
14106 * o match on SH (bit 133), using select/match 2.
14107 *
14108 * Use index 1 to extract the 8-bit receive context from DestQP
14109 * (start at bit 64). Use that as the RSM map table index.
14110 */
14111 rrd.offset = offset;
14112 rrd.pkt_type = 0;
14113 rrd.field1_off = 95;
14114 rrd.field2_off = 133;
14115 rrd.index1_off = 64;
14116 rrd.index1_width = 8;
14117 rrd.index2_off = 0;
14118 rrd.index2_width = 0;
14119 rrd.mask1 = 1;
14120 rrd.value1 = 1;
14121 rrd.mask2 = 1;
14122 rrd.value2 = 1;
14123
14124 /* add rule 1 */
14125 add_rsm_rule(dd, 1, &rrd);
14126
14127 rmt->used += dd->num_user_contexts;
14128}
14129
Mike Marciniszyn77241052015-07-30 15:17:43 -040014130static void init_rxe(struct hfi1_devdata *dd)
14131{
Dean Luick372cc85a2016-04-12 11:30:51 -070014132 struct rsm_map_table *rmt;
14133
Mike Marciniszyn77241052015-07-30 15:17:43 -040014134 /* enable all receive errors */
14135 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070014136
14137 rmt = alloc_rsm_map_table(dd);
14138 /* set up QOS, including the QPN map table */
14139 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070014140 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070014141 complete_rsm_map_table(dd, rmt);
14142 kfree(rmt);
14143
Mike Marciniszyn77241052015-07-30 15:17:43 -040014144 /*
14145 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14146 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14147 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14148 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14149 * Max_PayLoad_Size set to its minimum of 128.
14150 *
14151 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14152 * (64 bytes). Max_Payload_Size is possibly modified upward in
14153 * tune_pcie_caps() which is called after this routine.
14154 */
14155}
14156
14157static void init_other(struct hfi1_devdata *dd)
14158{
14159 /* enable all CCE errors */
14160 write_csr(dd, CCE_ERR_MASK, ~0ull);
14161 /* enable *some* Misc errors */
14162 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14163 /* enable all DC errors, except LCB */
14164 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14165 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14166}
14167
14168/*
14169 * Fill out the given AU table using the given CU. A CU is defined in terms
14170 * AUs. The table is a an encoding: given the index, how many AUs does that
14171 * represent?
14172 *
14173 * NOTE: Assumes that the register layout is the same for the
14174 * local and remote tables.
14175 */
14176static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14177 u32 csr0to3, u32 csr4to7)
14178{
14179 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080014180 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14181 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14182 2ull * cu <<
14183 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14184 4ull * cu <<
14185 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014186 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080014187 8ull * cu <<
14188 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14189 16ull * cu <<
14190 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14191 32ull * cu <<
14192 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14193 64ull * cu <<
14194 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014195}
14196
14197static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14198{
14199 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014200 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014201}
14202
14203void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14204{
14205 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014206 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014207}
14208
14209static void init_txe(struct hfi1_devdata *dd)
14210{
14211 int i;
14212
14213 /* enable all PIO, SDMA, general, and Egress errors */
14214 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14215 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14216 write_csr(dd, SEND_ERR_MASK, ~0ull);
14217 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14218
14219 /* enable all per-context and per-SDMA engine errors */
14220 for (i = 0; i < dd->chip_send_contexts; i++)
14221 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14222 for (i = 0; i < dd->chip_sdma_engines; i++)
14223 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14224
14225 /* set the local CU to AU mapping */
14226 assign_local_cm_au_table(dd, dd->vcu);
14227
14228 /*
14229 * Set reasonable default for Credit Return Timer
14230 * Don't set on Simulator - causes it to choke.
14231 */
14232 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14233 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14234}
14235
14236int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14237{
14238 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14239 unsigned sctxt;
14240 int ret = 0;
14241 u64 reg;
14242
14243 if (!rcd || !rcd->sc) {
14244 ret = -EINVAL;
14245 goto done;
14246 }
14247 sctxt = rcd->sc->hw_context;
14248 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14249 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14250 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14251 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14252 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14253 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14254 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14255 /*
14256 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014257 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014258 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014259 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14260 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14261 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14262 }
14263
14264 /* Enable J_KEY check on receive context. */
14265 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14266 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14267 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14268 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14269done:
14270 return ret;
14271}
14272
14273int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14274{
14275 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14276 unsigned sctxt;
14277 int ret = 0;
14278 u64 reg;
14279
14280 if (!rcd || !rcd->sc) {
14281 ret = -EINVAL;
14282 goto done;
14283 }
14284 sctxt = rcd->sc->hw_context;
14285 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14286 /*
14287 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14288 * This check would not have been enabled for A0 h/w, see
14289 * set_ctxt_jkey().
14290 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014291 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014292 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14293 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14294 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14295 }
14296 /* Turn off the J_KEY on the receive side */
14297 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14298done:
14299 return ret;
14300}
14301
14302int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14303{
14304 struct hfi1_ctxtdata *rcd;
14305 unsigned sctxt;
14306 int ret = 0;
14307 u64 reg;
14308
Jubin Johne4909742016-02-14 20:22:00 -080014309 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014310 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014311 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014312 ret = -EINVAL;
14313 goto done;
14314 }
14315 if (!rcd || !rcd->sc) {
14316 ret = -EINVAL;
14317 goto done;
14318 }
14319 sctxt = rcd->sc->hw_context;
14320 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14321 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14322 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14323 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14324 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014325 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014326 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14327done:
14328 return ret;
14329}
14330
14331int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14332{
14333 struct hfi1_ctxtdata *rcd;
14334 unsigned sctxt;
14335 int ret = 0;
14336 u64 reg;
14337
Jubin Johne4909742016-02-14 20:22:00 -080014338 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014339 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014340 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014341 ret = -EINVAL;
14342 goto done;
14343 }
14344 if (!rcd || !rcd->sc) {
14345 ret = -EINVAL;
14346 goto done;
14347 }
14348 sctxt = rcd->sc->hw_context;
14349 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14350 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14351 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14352 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14353done:
14354 return ret;
14355}
14356
14357/*
14358 * Start doing the clean up the the chip. Our clean up happens in multiple
14359 * stages and this is just the first.
14360 */
14361void hfi1_start_cleanup(struct hfi1_devdata *dd)
14362{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014363 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014364 free_cntrs(dd);
14365 free_rcverr(dd);
14366 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014367 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014368}
14369
14370#define HFI_BASE_GUID(dev) \
14371 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14372
14373/*
Dean Luick78eb1292016-03-05 08:49:45 -080014374 * Information can be shared between the two HFIs on the same ASIC
14375 * in the same OS. This function finds the peer device and sets
14376 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014377 */
Dean Luick78eb1292016-03-05 08:49:45 -080014378static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014379{
14380 unsigned long flags;
14381 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014382 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014383 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014384
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014385 /* pre-allocate the asic structure in case we are the first device */
14386 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14387 if (!asic_data)
14388 return -ENOMEM;
14389
Mike Marciniszyn77241052015-07-30 15:17:43 -040014390 spin_lock_irqsave(&hfi1_devs_lock, flags);
14391 /* Find our peer device */
14392 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14393 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14394 dd->unit != tmp->unit) {
14395 peer = tmp;
14396 break;
14397 }
14398 }
14399
Dean Luick78eb1292016-03-05 08:49:45 -080014400 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014401 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014402 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014403 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014404 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014405 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014406 mutex_init(&dd->asic_data->asic_resource_mutex);
14407 }
14408 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014409 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -040014410
14411 /* first one through - set up i2c devices */
14412 if (!peer)
14413 ret = set_up_i2c(dd, dd->asic_data);
14414
Dean Luick78eb1292016-03-05 08:49:45 -080014415 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014416}
14417
Dean Luick5d9157a2015-11-16 21:59:34 -050014418/*
14419 * Set dd->boardname. Use a generic name if a name is not returned from
14420 * EFI variable space.
14421 *
14422 * Return 0 on success, -ENOMEM if space could not be allocated.
14423 */
14424static int obtain_boardname(struct hfi1_devdata *dd)
14425{
14426 /* generic board description */
14427 const char generic[] =
14428 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14429 unsigned long size;
14430 int ret;
14431
14432 ret = read_hfi1_efi_var(dd, "description", &size,
14433 (void **)&dd->boardname);
14434 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014435 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014436 /* use generic description */
14437 dd->boardname = kstrdup(generic, GFP_KERNEL);
14438 if (!dd->boardname)
14439 return -ENOMEM;
14440 }
14441 return 0;
14442}
14443
Kaike Wan24487dd2016-02-26 13:33:23 -080014444/*
14445 * Check the interrupt registers to make sure that they are mapped correctly.
14446 * It is intended to help user identify any mismapping by VMM when the driver
14447 * is running in a VM. This function should only be called before interrupt
14448 * is set up properly.
14449 *
14450 * Return 0 on success, -EINVAL on failure.
14451 */
14452static int check_int_registers(struct hfi1_devdata *dd)
14453{
14454 u64 reg;
14455 u64 all_bits = ~(u64)0;
14456 u64 mask;
14457
14458 /* Clear CceIntMask[0] to avoid raising any interrupts */
14459 mask = read_csr(dd, CCE_INT_MASK);
14460 write_csr(dd, CCE_INT_MASK, 0ull);
14461 reg = read_csr(dd, CCE_INT_MASK);
14462 if (reg)
14463 goto err_exit;
14464
14465 /* Clear all interrupt status bits */
14466 write_csr(dd, CCE_INT_CLEAR, all_bits);
14467 reg = read_csr(dd, CCE_INT_STATUS);
14468 if (reg)
14469 goto err_exit;
14470
14471 /* Set all interrupt status bits */
14472 write_csr(dd, CCE_INT_FORCE, all_bits);
14473 reg = read_csr(dd, CCE_INT_STATUS);
14474 if (reg != all_bits)
14475 goto err_exit;
14476
14477 /* Restore the interrupt mask */
14478 write_csr(dd, CCE_INT_CLEAR, all_bits);
14479 write_csr(dd, CCE_INT_MASK, mask);
14480
14481 return 0;
14482err_exit:
14483 write_csr(dd, CCE_INT_MASK, mask);
14484 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14485 return -EINVAL;
14486}
14487
Mike Marciniszyn77241052015-07-30 15:17:43 -040014488/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014489 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014490 * @dev: the pci_dev for hfi1_ib device
14491 * @ent: pci_device_id struct for this dev
14492 *
14493 * Also allocates, initializes, and returns the devdata struct for this
14494 * device instance
14495 *
14496 * This is global, and is called directly at init to set up the
14497 * chip-specific function pointers for later use.
14498 */
14499struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14500 const struct pci_device_id *ent)
14501{
14502 struct hfi1_devdata *dd;
14503 struct hfi1_pportdata *ppd;
14504 u64 reg;
14505 int i, ret;
14506 static const char * const inames[] = { /* implementation names */
14507 "RTL silicon",
14508 "RTL VCS simulation",
14509 "RTL FPGA emulation",
14510 "Functional simulator"
14511 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014512 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014513
Jubin John17fb4f22016-02-14 20:21:52 -080014514 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14515 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014516 if (IS_ERR(dd))
14517 goto bail;
14518 ppd = dd->pport;
14519 for (i = 0; i < dd->num_pports; i++, ppd++) {
14520 int vl;
14521 /* init common fields */
14522 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14523 /* DC supports 4 link widths */
14524 ppd->link_width_supported =
14525 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14526 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14527 ppd->link_width_downgrade_supported =
14528 ppd->link_width_supported;
14529 /* start out enabling only 4X */
14530 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14531 ppd->link_width_downgrade_enabled =
14532 ppd->link_width_downgrade_supported;
14533 /* link width active is 0 when link is down */
14534 /* link width downgrade active is 0 when link is down */
14535
Jubin Johnd0d236e2016-02-14 20:20:15 -080014536 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14537 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014538 hfi1_early_err(&pdev->dev,
14539 "Invalid num_vls %u, using %u VLs\n",
14540 num_vls, HFI1_MAX_VLS_SUPPORTED);
14541 num_vls = HFI1_MAX_VLS_SUPPORTED;
14542 }
14543 ppd->vls_supported = num_vls;
14544 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014545 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014546 /* Set the default MTU. */
14547 for (vl = 0; vl < num_vls; vl++)
14548 dd->vld[vl].mtu = hfi1_max_mtu;
14549 dd->vld[15].mtu = MAX_MAD_PACKET;
14550 /*
14551 * Set the initial values to reasonable default, will be set
14552 * for real when link is up.
14553 */
14554 ppd->lstate = IB_PORT_DOWN;
14555 ppd->overrun_threshold = 0x4;
14556 ppd->phy_error_threshold = 0xf;
14557 ppd->port_crc_mode_enabled = link_crc_mask;
14558 /* initialize supported LTP CRC mode */
14559 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14560 /* initialize enabled LTP CRC mode */
14561 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14562 /* start in offline */
14563 ppd->host_link_state = HLS_DN_OFFLINE;
14564 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014565 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014566 }
14567
14568 dd->link_default = HLS_DN_POLL;
14569
14570 /*
14571 * Do remaining PCIe setup and save PCIe values in dd.
14572 * Any error printing is already done by the init code.
14573 * On return, we have the chip mapped.
14574 */
Easwar Hariharan26ea2542016-10-17 04:19:58 -070014575 ret = hfi1_pcie_ddinit(dd, pdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014576 if (ret < 0)
14577 goto bail_free;
14578
14579 /* verify that reads actually work, save revision for reset check */
14580 dd->revision = read_csr(dd, CCE_REVISION);
14581 if (dd->revision == ~(u64)0) {
14582 dd_dev_err(dd, "cannot read chip CSRs\n");
14583 ret = -EINVAL;
14584 goto bail_cleanup;
14585 }
14586 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14587 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14588 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14589 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14590
Jubin John4d114fd2016-02-14 20:21:43 -080014591 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014592 * Check interrupt registers mapping if the driver has no access to
14593 * the upstream component. In this case, it is likely that the driver
14594 * is running in a VM.
14595 */
14596 if (!parent) {
14597 ret = check_int_registers(dd);
14598 if (ret)
14599 goto bail_cleanup;
14600 }
14601
14602 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014603 * obtain the hardware ID - NOT related to unit, which is a
14604 * software enumeration
14605 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014606 reg = read_csr(dd, CCE_REVISION2);
14607 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14608 & CCE_REVISION2_HFI_ID_MASK;
14609 /* the variable size will remove unwanted bits */
14610 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14611 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14612 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014613 dd->icode < ARRAY_SIZE(inames) ?
14614 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014615
14616 /* speeds the hardware can support */
14617 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14618 /* speeds allowed to run at */
14619 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14620 /* give a reasonable active value, will be set on link up */
14621 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14622
14623 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14624 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14625 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14626 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14627 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14628 /* fix up link widths for emulation _p */
14629 ppd = dd->pport;
14630 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14631 ppd->link_width_supported =
14632 ppd->link_width_enabled =
14633 ppd->link_width_downgrade_supported =
14634 ppd->link_width_downgrade_enabled =
14635 OPA_LINK_WIDTH_1X;
14636 }
14637 /* insure num_vls isn't larger than number of sdma engines */
14638 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14639 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014640 num_vls, dd->chip_sdma_engines);
14641 num_vls = dd->chip_sdma_engines;
14642 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014643 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014644 }
14645
14646 /*
14647 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14648 * Limit the max if larger than the field holds. If timeout is
14649 * non-zero, then the calculated field will be at least 1.
14650 *
14651 * Must be after icode is set up - the cclock rate depends
14652 * on knowing the hardware being used.
14653 */
14654 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14655 if (dd->rcv_intr_timeout_csr >
14656 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14657 dd->rcv_intr_timeout_csr =
14658 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14659 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14660 dd->rcv_intr_timeout_csr = 1;
14661
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014662 /* needs to be done before we look for the peer device */
14663 read_guid(dd);
14664
Dean Luick78eb1292016-03-05 08:49:45 -080014665 /* set up shared ASIC data with peer device */
14666 ret = init_asic_data(dd);
14667 if (ret)
14668 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014669
Mike Marciniszyn77241052015-07-30 15:17:43 -040014670 /* obtain chip sizes, reset chip CSRs */
14671 init_chip(dd);
14672
14673 /* read in the PCIe link speed information */
14674 ret = pcie_speeds(dd);
14675 if (ret)
14676 goto bail_cleanup;
14677
Dean Luicke83eba22016-09-30 04:41:45 -070014678 /* call before get_platform_config(), after init_chip_resources() */
14679 ret = eprom_init(dd);
14680 if (ret)
14681 goto bail_free_rcverr;
14682
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014683 /* Needs to be called before hfi1_firmware_init */
14684 get_platform_config(dd);
14685
Mike Marciniszyn77241052015-07-30 15:17:43 -040014686 /* read in firmware */
14687 ret = hfi1_firmware_init(dd);
14688 if (ret)
14689 goto bail_cleanup;
14690
14691 /*
14692 * In general, the PCIe Gen3 transition must occur after the
14693 * chip has been idled (so it won't initiate any PCIe transactions
14694 * e.g. an interrupt) and before the driver changes any registers
14695 * (the transition will reset the registers).
14696 *
14697 * In particular, place this call after:
14698 * - init_chip() - the chip will not initiate any PCIe transactions
14699 * - pcie_speeds() - reads the current link speed
14700 * - hfi1_firmware_init() - the needed firmware is ready to be
14701 * downloaded
14702 */
14703 ret = do_pcie_gen3_transition(dd);
14704 if (ret)
14705 goto bail_cleanup;
14706
14707 /* start setting dd values and adjusting CSRs */
14708 init_early_variables(dd);
14709
14710 parse_platform_config(dd);
14711
Dean Luick5d9157a2015-11-16 21:59:34 -050014712 ret = obtain_boardname(dd);
14713 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014714 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014715
14716 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014717 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014718 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014719 (u32)dd->majrev,
14720 (u32)dd->minrev,
14721 (dd->revision >> CCE_REVISION_SW_SHIFT)
14722 & CCE_REVISION_SW_MASK);
14723
14724 ret = set_up_context_variables(dd);
14725 if (ret)
14726 goto bail_cleanup;
14727
14728 /* set initial RXE CSRs */
14729 init_rxe(dd);
14730 /* set initial TXE CSRs */
14731 init_txe(dd);
14732 /* set initial non-RXE, non-TXE CSRs */
14733 init_other(dd);
14734 /* set up KDETH QP prefix in both RX and TX CSRs */
14735 init_kdeth_qp(dd);
14736
Dennis Dalessandro41973442016-07-25 07:52:36 -070014737 ret = hfi1_dev_affinity_init(dd);
14738 if (ret)
14739 goto bail_cleanup;
Mitko Haralanov957558c2016-02-03 14:33:40 -080014740
Mike Marciniszyn77241052015-07-30 15:17:43 -040014741 /* send contexts must be set up before receive contexts */
14742 ret = init_send_contexts(dd);
14743 if (ret)
14744 goto bail_cleanup;
14745
14746 ret = hfi1_create_ctxts(dd);
14747 if (ret)
14748 goto bail_cleanup;
14749
14750 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14751 /*
14752 * rcd[0] is guaranteed to be valid by this point. Also, all
14753 * context are using the same value, as per the module parameter.
14754 */
14755 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14756
14757 ret = init_pervl_scs(dd);
14758 if (ret)
14759 goto bail_cleanup;
14760
14761 /* sdma init */
14762 for (i = 0; i < dd->num_pports; ++i) {
14763 ret = sdma_init(dd, i);
14764 if (ret)
14765 goto bail_cleanup;
14766 }
14767
14768 /* use contexts created by hfi1_create_ctxts */
14769 ret = set_up_interrupts(dd);
14770 if (ret)
14771 goto bail_cleanup;
14772
14773 /* set up LCB access - must be after set_up_interrupts() */
14774 init_lcb_access(dd);
14775
Ira Weinyfc0b76c2016-07-27 21:09:40 -040014776 /*
14777 * Serial number is created from the base guid:
14778 * [27:24] = base guid [38:35]
14779 * [23: 0] = base guid [23: 0]
14780 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014781 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
Ira Weinyfc0b76c2016-07-27 21:09:40 -040014782 (dd->base_guid & 0xFFFFFF) |
14783 ((dd->base_guid >> 11) & 0xF000000));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014784
14785 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14786 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14787 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14788
14789 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14790 if (ret)
14791 goto bail_clear_intr;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014792
14793 thermal_init(dd);
14794
14795 ret = init_cntrs(dd);
14796 if (ret)
14797 goto bail_clear_intr;
14798
14799 ret = init_rcverr(dd);
14800 if (ret)
14801 goto bail_free_cntrs;
14802
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -070014803 init_completion(&dd->user_comp);
14804
14805 /* The user refcount starts with one to inidicate an active device */
14806 atomic_set(&dd->user_refcount, 1);
14807
Mike Marciniszyn77241052015-07-30 15:17:43 -040014808 goto bail;
14809
14810bail_free_rcverr:
14811 free_rcverr(dd);
14812bail_free_cntrs:
14813 free_cntrs(dd);
14814bail_clear_intr:
14815 clean_up_interrupts(dd);
14816bail_cleanup:
14817 hfi1_pcie_ddcleanup(dd);
14818bail_free:
14819 hfi1_free_devdata(dd);
14820 dd = ERR_PTR(ret);
14821bail:
14822 return dd;
14823}
14824
14825static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14826 u32 dw_len)
14827{
14828 u32 delta_cycles;
14829 u32 current_egress_rate = ppd->current_egress_rate;
14830 /* rates here are in units of 10^6 bits/sec */
14831
14832 if (desired_egress_rate == -1)
14833 return 0; /* shouldn't happen */
14834
14835 if (desired_egress_rate >= current_egress_rate)
14836 return 0; /* we can't help go faster, only slower */
14837
14838 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14839 egress_cycles(dw_len * 4, current_egress_rate);
14840
14841 return (u16)delta_cycles;
14842}
14843
Mike Marciniszyn77241052015-07-30 15:17:43 -040014844/**
14845 * create_pbc - build a pbc for transmission
14846 * @flags: special case flags or-ed in built pbc
14847 * @srate: static rate
14848 * @vl: vl
14849 * @dwlen: dword length (header words + data words + pbc words)
14850 *
14851 * Create a PBC with the given flags, rate, VL, and length.
14852 *
14853 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14854 * for verbs, which does not use this PSM feature. The lone other caller
14855 * is for the diagnostic interface which calls this if the user does not
14856 * supply their own PBC.
14857 */
14858u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14859 u32 dw_len)
14860{
14861 u64 pbc, delay = 0;
14862
14863 if (unlikely(srate_mbs))
14864 delay = delay_cycles(ppd, srate_mbs, dw_len);
14865
14866 pbc = flags
14867 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14868 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14869 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14870 | (dw_len & PBC_LENGTH_DWS_MASK)
14871 << PBC_LENGTH_DWS_SHIFT;
14872
14873 return pbc;
14874}
14875
14876#define SBUS_THERMAL 0x4f
14877#define SBUS_THERM_MONITOR_MODE 0x1
14878
14879#define THERM_FAILURE(dev, ret, reason) \
14880 dd_dev_err((dd), \
14881 "Thermal sensor initialization failed: %s (%d)\n", \
14882 (reason), (ret))
14883
14884/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070014885 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014886 *
14887 * After initialization, enable polling of thermal sensor through
14888 * SBus interface. In order for this to work, the SBus Master
14889 * firmware has to be loaded due to the fact that the HW polling
14890 * logic uses SBus interrupts, which are not supported with
14891 * default firmware. Otherwise, no data will be returned through
14892 * the ASIC_STS_THERM CSR.
14893 */
14894static int thermal_init(struct hfi1_devdata *dd)
14895{
14896 int ret = 0;
14897
14898 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080014899 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014900 return ret;
14901
Dean Luick576531f2016-03-05 08:50:01 -080014902 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14903 if (ret) {
14904 THERM_FAILURE(dd, ret, "Acquire SBus");
14905 return ret;
14906 }
14907
Mike Marciniszyn77241052015-07-30 15:17:43 -040014908 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014909 /* Disable polling of thermal readings */
14910 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14911 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014912 /* Thermal Sensor Initialization */
14913 /* Step 1: Reset the Thermal SBus Receiver */
14914 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14915 RESET_SBUS_RECEIVER, 0);
14916 if (ret) {
14917 THERM_FAILURE(dd, ret, "Bus Reset");
14918 goto done;
14919 }
14920 /* Step 2: Set Reset bit in Thermal block */
14921 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14922 WRITE_SBUS_RECEIVER, 0x1);
14923 if (ret) {
14924 THERM_FAILURE(dd, ret, "Therm Block Reset");
14925 goto done;
14926 }
14927 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14928 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14929 WRITE_SBUS_RECEIVER, 0x32);
14930 if (ret) {
14931 THERM_FAILURE(dd, ret, "Write Clock Div");
14932 goto done;
14933 }
14934 /* Step 4: Select temperature mode */
14935 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14936 WRITE_SBUS_RECEIVER,
14937 SBUS_THERM_MONITOR_MODE);
14938 if (ret) {
14939 THERM_FAILURE(dd, ret, "Write Mode Sel");
14940 goto done;
14941 }
14942 /* Step 5: De-assert block reset and start conversion */
14943 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14944 WRITE_SBUS_RECEIVER, 0x2);
14945 if (ret) {
14946 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14947 goto done;
14948 }
14949 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14950 msleep(22);
14951
14952 /* Enable polling of thermal readings */
14953 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080014954
14955 /* Set initialized flag */
14956 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14957 if (ret)
14958 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14959
Mike Marciniszyn77241052015-07-30 15:17:43 -040014960done:
Dean Luick576531f2016-03-05 08:50:01 -080014961 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014962 return ret;
14963}
14964
14965static void handle_temp_err(struct hfi1_devdata *dd)
14966{
14967 struct hfi1_pportdata *ppd = &dd->pport[0];
14968 /*
14969 * Thermal Critical Interrupt
14970 * Put the device into forced freeze mode, take link down to
14971 * offline, and put DC into reset.
14972 */
14973 dd_dev_emerg(dd,
14974 "Critical temperature reached! Forcing device into freeze mode!\n");
14975 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014976 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014977 /*
14978 * Shut DC down as much and as quickly as possible.
14979 *
14980 * Step 1: Take the link down to OFFLINE. This will cause the
14981 * 8051 to put the Serdes in reset. However, we don't want to
14982 * go through the entire link state machine since we want to
14983 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14984 * but rather an attempt to save the chip.
14985 * Code below is almost the same as quiet_serdes() but avoids
14986 * all the extra work and the sleeps.
14987 */
14988 ppd->driver_link_ready = 0;
14989 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080014990 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14991 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014992 /*
14993 * Step 2: Shutdown LCB and 8051
14994 * After shutdown, do not restore DC_CFG_RESET value.
14995 */
14996 dc_shutdown(dd);
14997}