blob: 24d0820873cf2df220892b9523907aeb9485eb8b [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070066#include "affinity.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040067
68#define NUM_IB_PORTS 1
69
70uint kdeth_qp;
71module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75module_param(num_vls, uint, S_IRUGO);
76MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78/*
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
84 */
85uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86module_param(rcv_intr_timeout, uint, S_IRUGO);
87MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89uint rcv_intr_count = 16; /* same as qib */
90module_param(rcv_intr_count, uint, S_IRUGO);
91MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93ushort link_crc_mask = SUPPORTED_CRCS;
94module_param(link_crc_mask, ushort, S_IRUGO);
95MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97uint loopback;
98module_param_named(loopback, loopback, uint, S_IRUGO);
99MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101/* Other driver tunables */
102uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103static ushort crc_14b_sideband = 1;
104static uint use_flr = 1;
105uint quick_linkup; /* skip LNI */
106
107struct flag_table {
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
111 u16 unused0;
112 u32 unused1;
113};
114
115/* str must be a string constant */
116#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117#define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119/* Send Error Consequences */
120#define SEC_WRITE_DROPPED 0x1
121#define SEC_PACKET_DROPPED 0x2
122#define SEC_SC_HALTED 0x4 /* per-context only */
123#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
124
Harish Chegondi8784ac02016-07-25 13:38:50 -0700125#define DEFAULT_KRCVQS 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400126#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500127#define FIRST_KERNEL_KCTXT 1
Dean Luick372cc85a2016-04-12 11:30:51 -0700128/* sizes for both the QP and RSM map tables */
129#define NUM_MAP_ENTRIES 256
Mike Marciniszyn77241052015-07-30 15:17:43 -0400130#define NUM_MAP_REGS 32
131
132/* Bit offset into the GUID which carries HFI id information */
133#define GUID_HFI_INDEX_SHIFT 39
134
135/* extract the emulation revision */
136#define emulator_rev(dd) ((dd)->irev >> 8)
137/* parallel and serial emulation versions are 3 and 4 respectively */
138#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
139#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
140
141/* RSM fields */
142
143/* packet type */
144#define IB_PACKET_TYPE 2ull
145#define QW_SHIFT 6ull
146/* QPN[7..1] */
147#define QPN_WIDTH 7ull
148
149/* LRH.BTH: QW 0, OFFSET 48 - for match */
150#define LRH_BTH_QW 0ull
151#define LRH_BTH_BIT_OFFSET 48ull
152#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
153#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
154#define LRH_BTH_SELECT
155#define LRH_BTH_MASK 3ull
156#define LRH_BTH_VALUE 2ull
157
158/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
159#define LRH_SC_QW 0ull
160#define LRH_SC_BIT_OFFSET 56ull
161#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
162#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
163#define LRH_SC_MASK 128ull
164#define LRH_SC_VALUE 0ull
165
166/* SC[n..0] QW 0, OFFSET 60 - for select */
167#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
168
169/* QPN[m+n:1] QW 1, OFFSET 1 */
170#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
171
172/* defines to build power on SC2VL table */
173#define SC2VL_VAL( \
174 num, \
175 sc0, sc0val, \
176 sc1, sc1val, \
177 sc2, sc2val, \
178 sc3, sc3val, \
179 sc4, sc4val, \
180 sc5, sc5val, \
181 sc6, sc6val, \
182 sc7, sc7val) \
183( \
184 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
185 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
186 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
187 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
188 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
189 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
190 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
191 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
192)
193
194#define DC_SC_VL_VAL( \
195 range, \
196 e0, e0val, \
197 e1, e1val, \
198 e2, e2val, \
199 e3, e3val, \
200 e4, e4val, \
201 e5, e5val, \
202 e6, e6val, \
203 e7, e7val, \
204 e8, e8val, \
205 e9, e9val, \
206 e10, e10val, \
207 e11, e11val, \
208 e12, e12val, \
209 e13, e13val, \
210 e14, e14val, \
211 e15, e15val) \
212( \
213 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
214 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
215 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
216 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
217 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
218 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
219 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
220 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
221 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
222 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
223 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
224 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
225 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
226 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
227 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
228 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
229)
230
231/* all CceStatus sub-block freeze bits */
232#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
233 | CCE_STATUS_RXE_FROZE_SMASK \
234 | CCE_STATUS_TXE_FROZE_SMASK \
235 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
236/* all CceStatus sub-block TXE pause bits */
237#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
238 | CCE_STATUS_TXE_PAUSED_SMASK \
239 | CCE_STATUS_SDMA_PAUSED_SMASK)
240/* all CceStatus sub-block RXE pause bits */
241#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
242
Jakub Pawlak2b719042016-07-01 16:01:22 -0700243#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
244#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
245
Mike Marciniszyn77241052015-07-30 15:17:43 -0400246/*
247 * CCE Error flags.
248 */
249static struct flag_table cce_err_status_flags[] = {
250/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
251 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
252/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
253 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
254/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
255 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
256/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
257 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
258/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
259 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
260/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
261 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
262/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
263 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
264/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
265 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
266/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
267 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
268/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
269 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
270/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
271 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
272/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
273 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
274/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
275 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
276/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
277 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
278/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
279 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
280/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
281 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
282/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
283 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
284/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
285 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
286/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
287 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
288/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
289 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
290/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
291 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
292/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
293 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
294/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
295 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
296/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
297 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
298/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
299 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
300/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
301 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
302/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
303 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
304/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
305 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
306/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
307 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
308/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
309 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
310/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
311 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
312/*31*/ FLAG_ENTRY0("LATriggered",
313 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
314/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
315 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
316/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
317 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
318/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
319 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
320/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
321 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
322/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
323 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
324/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
325 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
326/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
327 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
328/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
329 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
330/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
331 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
332/*41-63 reserved*/
333};
334
335/*
336 * Misc Error flags
337 */
338#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
339static struct flag_table misc_err_status_flags[] = {
340/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
341/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
342/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
343/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
344/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
345/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
346/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
347/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
348/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
349/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
350/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
351/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
352/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
353};
354
355/*
356 * TXE PIO Error flags and consequences
357 */
358static struct flag_table pio_err_status_flags[] = {
359/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
360 SEC_WRITE_DROPPED,
361 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
362/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
363 SEC_SPC_FREEZE,
364 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
365/* 2*/ FLAG_ENTRY("PioCsrParity",
366 SEC_SPC_FREEZE,
367 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
368/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
369 SEC_SPC_FREEZE,
370 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
371/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
372 SEC_SPC_FREEZE,
373 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
374/* 5*/ FLAG_ENTRY("PioPccFifoParity",
375 SEC_SPC_FREEZE,
376 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
377/* 6*/ FLAG_ENTRY("PioPecFifoParity",
378 SEC_SPC_FREEZE,
379 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
380/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
381 SEC_SPC_FREEZE,
382 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
383/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
384 SEC_SPC_FREEZE,
385 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
386/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
387 SEC_SPC_FREEZE,
388 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
389/*10*/ FLAG_ENTRY("PioSmPktResetParity",
390 SEC_SPC_FREEZE,
391 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
392/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
393 SEC_SPC_FREEZE,
394 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
395/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
396 SEC_SPC_FREEZE,
397 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
398/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
399 0,
400 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
401/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
402 0,
403 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
404/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
405 SEC_SPC_FREEZE,
406 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
407/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
408 SEC_SPC_FREEZE,
409 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
410/*17*/ FLAG_ENTRY("PioInitSmIn",
411 0,
412 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
413/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
414 SEC_SPC_FREEZE,
415 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
416/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
417 SEC_SPC_FREEZE,
418 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
419/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
420 0,
421 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
422/*21*/ FLAG_ENTRY("PioWriteDataParity",
423 SEC_SPC_FREEZE,
424 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
425/*22*/ FLAG_ENTRY("PioStateMachine",
426 SEC_SPC_FREEZE,
427 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
428/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800429 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400430 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
431/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800432 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400433 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
434/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
435 SEC_SPC_FREEZE,
436 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
437/*26*/ FLAG_ENTRY("PioVlfSopParity",
438 SEC_SPC_FREEZE,
439 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
440/*27*/ FLAG_ENTRY("PioVlFifoParity",
441 SEC_SPC_FREEZE,
442 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
443/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
444 SEC_SPC_FREEZE,
445 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
446/*29*/ FLAG_ENTRY("PioPpmcSopLen",
447 SEC_SPC_FREEZE,
448 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
449/*30-31 reserved*/
450/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
451 SEC_SPC_FREEZE,
452 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
453/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
454 SEC_SPC_FREEZE,
455 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
456/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
457 SEC_SPC_FREEZE,
458 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
459/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
460 SEC_SPC_FREEZE,
461 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
462/*36-63 reserved*/
463};
464
465/* TXE PIO errors that cause an SPC freeze */
466#define ALL_PIO_FREEZE_ERR \
467 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
468 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
469 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
470 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
471 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
472 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
473 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
474 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
475 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
476 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
477 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
478 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
479 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
480 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
481 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
482 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
483 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
484 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
485 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
486 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
487 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
488 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
489 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
490 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
491 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
492 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
493 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
494 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
495 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
496
497/*
498 * TXE SDMA Error flags
499 */
500static struct flag_table sdma_err_status_flags[] = {
501/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
502 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
503/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
504 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
505/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
506 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
507/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
508 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
509/*04-63 reserved*/
510};
511
512/* TXE SDMA errors that cause an SPC freeze */
513#define ALL_SDMA_FREEZE_ERR \
514 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
515 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
516 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
517
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800518/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
519#define PORT_DISCARD_EGRESS_ERRS \
520 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
521 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
522 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
523
Mike Marciniszyn77241052015-07-30 15:17:43 -0400524/*
525 * TXE Egress Error flags
526 */
527#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
528static struct flag_table egress_err_status_flags[] = {
529/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
530/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
531/* 2 reserved */
532/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
533 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
534/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
535/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
536/* 6 reserved */
537/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
538 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
539/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
540 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
541/* 9-10 reserved */
542/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
543 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
544/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
545/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
546/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
547/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
548/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
549 SEES(TX_SDMA0_DISALLOWED_PACKET)),
550/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
551 SEES(TX_SDMA1_DISALLOWED_PACKET)),
552/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
553 SEES(TX_SDMA2_DISALLOWED_PACKET)),
554/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
555 SEES(TX_SDMA3_DISALLOWED_PACKET)),
556/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
557 SEES(TX_SDMA4_DISALLOWED_PACKET)),
558/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
559 SEES(TX_SDMA5_DISALLOWED_PACKET)),
560/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
561 SEES(TX_SDMA6_DISALLOWED_PACKET)),
562/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
563 SEES(TX_SDMA7_DISALLOWED_PACKET)),
564/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
565 SEES(TX_SDMA8_DISALLOWED_PACKET)),
566/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
567 SEES(TX_SDMA9_DISALLOWED_PACKET)),
568/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
569 SEES(TX_SDMA10_DISALLOWED_PACKET)),
570/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
571 SEES(TX_SDMA11_DISALLOWED_PACKET)),
572/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
573 SEES(TX_SDMA12_DISALLOWED_PACKET)),
574/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
575 SEES(TX_SDMA13_DISALLOWED_PACKET)),
576/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
577 SEES(TX_SDMA14_DISALLOWED_PACKET)),
578/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
579 SEES(TX_SDMA15_DISALLOWED_PACKET)),
580/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
581 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
582/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
583 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
584/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
585 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
586/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
587 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
588/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
589 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
590/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
591 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
592/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
593 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
594/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
595 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
596/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
597 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
598/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
599/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
600/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
601/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
602/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
603/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
604/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
605/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
606/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
607/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
608/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
609/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
610/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
611/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
612/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
613/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
614/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
615/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
616/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
617/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
618/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
619/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
620 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
621/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
622 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
623};
624
625/*
626 * TXE Egress Error Info flags
627 */
628#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
629static struct flag_table egress_err_info_flags[] = {
630/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
631/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
632/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
633/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
634/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
635/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
636/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
637/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
638/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
639/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
640/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
641/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
642/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
643/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
644/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
645/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
646/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
647/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
648/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
649/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
650/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
651/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
652};
653
654/* TXE Egress errors that cause an SPC freeze */
655#define ALL_TXE_EGRESS_FREEZE_ERR \
656 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
657 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
658 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
659 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
660 | SEES(TX_LAUNCH_CSR_PARITY) \
661 | SEES(TX_SBRD_CTL_CSR_PARITY) \
662 | SEES(TX_CONFIG_PARITY) \
663 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
664 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
665 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
666 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
667 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
668 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
669 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
670 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
671 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
672 | SEES(TX_CREDIT_RETURN_PARITY))
673
674/*
675 * TXE Send error flags
676 */
677#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
678static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500679/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400680/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
681/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
682};
683
684/*
685 * TXE Send Context Error flags and consequences
686 */
687static struct flag_table sc_err_status_flags[] = {
688/* 0*/ FLAG_ENTRY("InconsistentSop",
689 SEC_PACKET_DROPPED | SEC_SC_HALTED,
690 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
691/* 1*/ FLAG_ENTRY("DisallowedPacket",
692 SEC_PACKET_DROPPED | SEC_SC_HALTED,
693 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
694/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
695 SEC_WRITE_DROPPED | SEC_SC_HALTED,
696 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
697/* 3*/ FLAG_ENTRY("WriteOverflow",
698 SEC_WRITE_DROPPED | SEC_SC_HALTED,
699 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
700/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
701 SEC_WRITE_DROPPED | SEC_SC_HALTED,
702 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
703/* 5-63 reserved*/
704};
705
706/*
707 * RXE Receive Error flags
708 */
709#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
710static struct flag_table rxe_err_status_flags[] = {
711/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
712/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
713/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
714/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
715/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
716/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
717/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
718/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
719/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
720/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
721/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
722/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
723/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
724/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
725/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
726/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
727/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
728 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
729/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
730/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
731/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
732 RXES(RBUF_BLOCK_LIST_READ_UNC)),
733/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
734 RXES(RBUF_BLOCK_LIST_READ_COR)),
735/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
736 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
737/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
738 RXES(RBUF_CSR_QENT_CNT_PARITY)),
739/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
740 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
741/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
742 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
743/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
744/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
745/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
746 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
747/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
748/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
749/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
750/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
751/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
752/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
753/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
754/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
755 RXES(RBUF_FL_INITDONE_PARITY)),
756/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
757 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
758/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
759/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
760/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
761/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
762 RXES(LOOKUP_DES_PART1_UNC_COR)),
763/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
764 RXES(LOOKUP_DES_PART2_PARITY)),
765/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
766/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
767/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
768/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
769/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
770/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
771/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
772/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
773/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
774/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
775/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
776/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
777/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
778/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
779/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
780/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
781/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
782/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
783/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
784/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
785/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
786/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
787};
788
789/* RXE errors that will trigger an SPC freeze */
790#define ALL_RXE_FREEZE_ERR \
791 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
792 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
793 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
794 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
795 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
796 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
797 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
798 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
799 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
800 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
801 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
802 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
803 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
804 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
805 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
806 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
807 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
808 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
809 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
810 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
811 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
812 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
813 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
814 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
815 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
816 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
817 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
818 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
819 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
835
836#define RXE_FREEZE_ABORT_MASK \
837 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
838 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
839 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
840
841/*
842 * DCC Error Flags
843 */
844#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
845static struct flag_table dcc_err_flags[] = {
846 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
847 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
848 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
849 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
850 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
851 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
852 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
853 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
854 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
855 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
856 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
857 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
858 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
859 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
860 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
861 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
862 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
863 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
864 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
865 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
866 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
867 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
868 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
869 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
870 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
871 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
872 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
873 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
874 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
875 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
876 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
877 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
878 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
879 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
880 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
881 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
882 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
883 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
884 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
885 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
886 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
887 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
888 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
889 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
890 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
891 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
892};
893
894/*
895 * LCB error flags
896 */
897#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
898static struct flag_table lcb_err_flags[] = {
899/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
900/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
901/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
902/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
903 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
904/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
905/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
906/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
907/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
908/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
909/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
910/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
911/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
912/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
913/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
914 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
915/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
916/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
917/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
918/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
919/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
920/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
921 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
922/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
923/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
924/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
925/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
926/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
927/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
928/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
929 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
930/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
931/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
932 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
933/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
934 LCBE(REDUNDANT_FLIT_PARITY_ERR))
935};
936
937/*
938 * DC8051 Error Flags
939 */
940#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
941static struct flag_table dc8051_err_flags[] = {
942 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
943 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
944 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
945 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
946 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
947 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
948 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
949 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
950 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800951 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400952 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
953};
954
955/*
956 * DC8051 Information Error flags
957 *
958 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
959 */
960static struct flag_table dc8051_info_err_flags[] = {
961 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
962 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
963 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
964 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800965 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400966 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
967 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
968 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
969 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
970 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
971 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
972 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -0800973 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
Dean Luick50921be2016-09-25 07:41:53 -0700974 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
975 FLAG_ENTRY0("External Device Request Timeout",
976 EXTERNAL_DEVICE_REQ_TIMEOUT),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400977};
978
979/*
980 * DC8051 Information Host Information flags
981 *
982 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
983 */
984static struct flag_table dc8051_info_host_msg_flags[] = {
985 FLAG_ENTRY0("Host request done", 0x0001),
986 FLAG_ENTRY0("BC SMA message", 0x0002),
987 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
988 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
989 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
990 FLAG_ENTRY0("External device config request", 0x0020),
991 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
992 FLAG_ENTRY0("LinkUp achieved", 0x0080),
993 FLAG_ENTRY0("Link going down", 0x0100),
994};
995
Mike Marciniszyn77241052015-07-30 15:17:43 -0400996static u32 encoded_size(u32 size);
997static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
998static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
999static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1000 u8 *continuous);
1001static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1002 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1003static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1004 u8 *remote_tx_rate, u16 *link_widths);
1005static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1006 u8 *flag_bits, u16 *link_widths);
1007static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1008 u8 *device_rev);
1009static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1010static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1011static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1012 u8 *tx_polarity_inversion,
1013 u8 *rx_polarity_inversion, u8 *max_rate);
1014static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1015 unsigned int context, u64 err_status);
1016static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1017static void handle_dcc_err(struct hfi1_devdata *dd,
1018 unsigned int context, u64 err_status);
1019static void handle_lcb_err(struct hfi1_devdata *dd,
1020 unsigned int context, u64 err_status);
1021static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1023static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1024static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1025static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1026static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029static void set_partition_keys(struct hfi1_pportdata *);
1030static const char *link_state_name(u32 state);
1031static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1032 u32 state);
1033static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1034 u64 *out_data);
1035static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1036static int thermal_init(struct hfi1_devdata *dd);
1037
1038static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1039 int msecs);
1040static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001041static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001042static void handle_temp_err(struct hfi1_devdata *);
1043static void dc_shutdown(struct hfi1_devdata *);
1044static void dc_start(struct hfi1_devdata *);
Dean Luick8f000f72016-04-12 11:32:06 -07001045static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1046 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001047static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001048
1049/*
1050 * Error interrupt table entry. This is used as input to the interrupt
1051 * "clear down" routine used for all second tier error interrupt register.
1052 * Second tier interrupt registers have a single bit representing them
1053 * in the top-level CceIntStatus.
1054 */
1055struct err_reg_info {
1056 u32 status; /* status CSR offset */
1057 u32 clear; /* clear CSR offset */
1058 u32 mask; /* mask CSR offset */
1059 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1060 const char *desc;
1061};
1062
1063#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1064#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1065#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1066
1067/*
1068 * Helpers for building HFI and DC error interrupt table entries. Different
1069 * helpers are needed because of inconsistent register names.
1070 */
1071#define EE(reg, handler, desc) \
1072 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1073 handler, desc }
1074#define DC_EE1(reg, handler, desc) \
1075 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1076#define DC_EE2(reg, handler, desc) \
1077 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1078
1079/*
1080 * Table of the "misc" grouping of error interrupts. Each entry refers to
1081 * another register containing more information.
1082 */
1083static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1084/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1085/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1086/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1087/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1088/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1089/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1090/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1091/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1092 /* the rest are reserved */
1093};
1094
1095/*
1096 * Index into the Various section of the interrupt sources
1097 * corresponding to the Critical Temperature interrupt.
1098 */
1099#define TCRIT_INT_SOURCE 4
1100
1101/*
1102 * SDMA error interrupt entry - refers to another register containing more
1103 * information.
1104 */
1105static const struct err_reg_info sdma_eng_err =
1106 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1107
1108static const struct err_reg_info various_err[NUM_VARIOUS] = {
1109/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1110/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1111/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1112/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1113/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1114 /* rest are reserved */
1115};
1116
1117/*
1118 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1119 * register can not be derived from the MTU value because 10K is not
1120 * a power of 2. Therefore, we need a constant. Everything else can
1121 * be calculated.
1122 */
1123#define DCC_CFG_PORT_MTU_CAP_10240 7
1124
1125/*
1126 * Table of the DC grouping of error interrupts. Each entry refers to
1127 * another register containing more information.
1128 */
1129static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1130/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1131/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1132/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1133/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1134 /* the rest are reserved */
1135};
1136
1137struct cntr_entry {
1138 /*
1139 * counter name
1140 */
1141 char *name;
1142
1143 /*
1144 * csr to read for name (if applicable)
1145 */
1146 u64 csr;
1147
1148 /*
1149 * offset into dd or ppd to store the counter's value
1150 */
1151 int offset;
1152
1153 /*
1154 * flags
1155 */
1156 u8 flags;
1157
1158 /*
1159 * accessor for stat element, context either dd or ppd
1160 */
Jubin John17fb4f22016-02-14 20:21:52 -08001161 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1162 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001163};
1164
1165#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1166#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1167
1168#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1169{ \
1170 name, \
1171 csr, \
1172 offset, \
1173 flags, \
1174 accessor \
1175}
1176
1177/* 32bit RXE */
1178#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1179CNTR_ELEM(#name, \
1180 (counter * 8 + RCV_COUNTER_ARRAY32), \
1181 0, flags | CNTR_32BIT, \
1182 port_access_u32_csr)
1183
1184#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1185CNTR_ELEM(#name, \
1186 (counter * 8 + RCV_COUNTER_ARRAY32), \
1187 0, flags | CNTR_32BIT, \
1188 dev_access_u32_csr)
1189
1190/* 64bit RXE */
1191#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1192CNTR_ELEM(#name, \
1193 (counter * 8 + RCV_COUNTER_ARRAY64), \
1194 0, flags, \
1195 port_access_u64_csr)
1196
1197#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1198CNTR_ELEM(#name, \
1199 (counter * 8 + RCV_COUNTER_ARRAY64), \
1200 0, flags, \
1201 dev_access_u64_csr)
1202
1203#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1204#define OVR_ELM(ctx) \
1205CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001206 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001207 0, CNTR_NORMAL, port_access_u64_csr)
1208
1209/* 32bit TXE */
1210#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1211CNTR_ELEM(#name, \
1212 (counter * 8 + SEND_COUNTER_ARRAY32), \
1213 0, flags | CNTR_32BIT, \
1214 port_access_u32_csr)
1215
1216/* 64bit TXE */
1217#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1218CNTR_ELEM(#name, \
1219 (counter * 8 + SEND_COUNTER_ARRAY64), \
1220 0, flags, \
1221 port_access_u64_csr)
1222
1223# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1224CNTR_ELEM(#name,\
1225 counter * 8 + SEND_COUNTER_ARRAY64, \
1226 0, \
1227 flags, \
1228 dev_access_u64_csr)
1229
1230/* CCE */
1231#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1232CNTR_ELEM(#name, \
1233 (counter * 8 + CCE_COUNTER_ARRAY32), \
1234 0, flags | CNTR_32BIT, \
1235 dev_access_u32_csr)
1236
1237#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1238CNTR_ELEM(#name, \
1239 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1240 0, flags | CNTR_32BIT, \
1241 dev_access_u32_csr)
1242
1243/* DC */
1244#define DC_PERF_CNTR(name, counter, flags) \
1245CNTR_ELEM(#name, \
1246 counter, \
1247 0, \
1248 flags, \
1249 dev_access_u64_csr)
1250
1251#define DC_PERF_CNTR_LCB(name, counter, flags) \
1252CNTR_ELEM(#name, \
1253 counter, \
1254 0, \
1255 flags, \
1256 dc_access_lcb_cntr)
1257
1258/* ibp counters */
1259#define SW_IBP_CNTR(name, cntr) \
1260CNTR_ELEM(#name, \
1261 0, \
1262 0, \
1263 CNTR_SYNTH, \
1264 access_ibp_##cntr)
1265
1266u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1267{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001268 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301269 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001270 }
1271 return -1;
1272}
1273
1274void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1275{
1276 if (dd->flags & HFI1_PRESENT)
1277 writeq(value, (void __iomem *)dd->kregbase + offset);
1278}
1279
1280void __iomem *get_csr_addr(
1281 struct hfi1_devdata *dd,
1282 u32 offset)
1283{
1284 return (void __iomem *)dd->kregbase + offset;
1285}
1286
1287static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1288 int mode, u64 value)
1289{
1290 u64 ret;
1291
Mike Marciniszyn77241052015-07-30 15:17:43 -04001292 if (mode == CNTR_MODE_R) {
1293 ret = read_csr(dd, csr);
1294 } else if (mode == CNTR_MODE_W) {
1295 write_csr(dd, csr, value);
1296 ret = value;
1297 } else {
1298 dd_dev_err(dd, "Invalid cntr register access mode");
1299 return 0;
1300 }
1301
1302 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1303 return ret;
1304}
1305
1306/* Dev Access */
1307static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001308 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001309{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301310 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001311 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001312
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001313 if (entry->flags & CNTR_SDMA) {
1314 if (vl == CNTR_INVALID_VL)
1315 return 0;
1316 csr += 0x100 * vl;
1317 } else {
1318 if (vl != CNTR_INVALID_VL)
1319 return 0;
1320 }
1321 return read_write_csr(dd, csr, mode, data);
1322}
1323
1324static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1325 void *context, int idx, int mode, u64 data)
1326{
1327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1328
1329 if (dd->per_sdma && idx < dd->num_sdma)
1330 return dd->per_sdma[idx].err_cnt;
1331 return 0;
1332}
1333
1334static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1335 void *context, int idx, int mode, u64 data)
1336{
1337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1338
1339 if (dd->per_sdma && idx < dd->num_sdma)
1340 return dd->per_sdma[idx].sdma_int_cnt;
1341 return 0;
1342}
1343
1344static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1345 void *context, int idx, int mode, u64 data)
1346{
1347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348
1349 if (dd->per_sdma && idx < dd->num_sdma)
1350 return dd->per_sdma[idx].idle_int_cnt;
1351 return 0;
1352}
1353
1354static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1355 void *context, int idx, int mode,
1356 u64 data)
1357{
1358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1359
1360 if (dd->per_sdma && idx < dd->num_sdma)
1361 return dd->per_sdma[idx].progress_int_cnt;
1362 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001363}
1364
1365static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001366 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001367{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301368 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001369
1370 u64 val = 0;
1371 u64 csr = entry->csr;
1372
1373 if (entry->flags & CNTR_VL) {
1374 if (vl == CNTR_INVALID_VL)
1375 return 0;
1376 csr += 8 * vl;
1377 } else {
1378 if (vl != CNTR_INVALID_VL)
1379 return 0;
1380 }
1381
1382 val = read_write_csr(dd, csr, mode, data);
1383 return val;
1384}
1385
1386static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001387 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001388{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301389 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001390 u32 csr = entry->csr;
1391 int ret = 0;
1392
1393 if (vl != CNTR_INVALID_VL)
1394 return 0;
1395 if (mode == CNTR_MODE_R)
1396 ret = read_lcb_csr(dd, csr, &data);
1397 else if (mode == CNTR_MODE_W)
1398 ret = write_lcb_csr(dd, csr, data);
1399
1400 if (ret) {
1401 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1402 return 0;
1403 }
1404
1405 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1406 return data;
1407}
1408
1409/* Port Access */
1410static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001411 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001412{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301413 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001414
1415 if (vl != CNTR_INVALID_VL)
1416 return 0;
1417 return read_write_csr(ppd->dd, entry->csr, mode, data);
1418}
1419
1420static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001421 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001422{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301423 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001424 u64 val;
1425 u64 csr = entry->csr;
1426
1427 if (entry->flags & CNTR_VL) {
1428 if (vl == CNTR_INVALID_VL)
1429 return 0;
1430 csr += 8 * vl;
1431 } else {
1432 if (vl != CNTR_INVALID_VL)
1433 return 0;
1434 }
1435 val = read_write_csr(ppd->dd, csr, mode, data);
1436 return val;
1437}
1438
1439/* Software defined */
1440static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1441 u64 data)
1442{
1443 u64 ret;
1444
1445 if (mode == CNTR_MODE_R) {
1446 ret = *cntr;
1447 } else if (mode == CNTR_MODE_W) {
1448 *cntr = data;
1449 ret = data;
1450 } else {
1451 dd_dev_err(dd, "Invalid cntr sw access mode");
1452 return 0;
1453 }
1454
1455 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1456
1457 return ret;
1458}
1459
1460static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001461 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001462{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301463 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001464
1465 if (vl != CNTR_INVALID_VL)
1466 return 0;
1467 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1468}
1469
1470static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001471 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001472{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301473 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001474
1475 if (vl != CNTR_INVALID_VL)
1476 return 0;
1477 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1478}
1479
Dean Luick6d014532015-12-01 15:38:23 -05001480static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1481 void *context, int vl, int mode,
1482 u64 data)
1483{
1484 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1485
1486 if (vl != CNTR_INVALID_VL)
1487 return 0;
1488 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1489}
1490
Mike Marciniszyn77241052015-07-30 15:17:43 -04001491static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001492 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001493{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001494 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1495 u64 zero = 0;
1496 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001498 if (vl == CNTR_INVALID_VL)
1499 counter = &ppd->port_xmit_discards;
1500 else if (vl >= 0 && vl < C_VL_COUNT)
1501 counter = &ppd->port_xmit_discards_vl[vl];
1502 else
1503 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001504
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001505 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001506}
1507
1508static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001509 void *context, int vl, int mode,
1510 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001511{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301512 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001513
1514 if (vl != CNTR_INVALID_VL)
1515 return 0;
1516
1517 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1518 mode, data);
1519}
1520
1521static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001522 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001523{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301524 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001525
1526 if (vl != CNTR_INVALID_VL)
1527 return 0;
1528
1529 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1530 mode, data);
1531}
1532
1533u64 get_all_cpu_total(u64 __percpu *cntr)
1534{
1535 int cpu;
1536 u64 counter = 0;
1537
1538 for_each_possible_cpu(cpu)
1539 counter += *per_cpu_ptr(cntr, cpu);
1540 return counter;
1541}
1542
1543static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1544 u64 __percpu *cntr,
1545 int vl, int mode, u64 data)
1546{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001547 u64 ret = 0;
1548
1549 if (vl != CNTR_INVALID_VL)
1550 return 0;
1551
1552 if (mode == CNTR_MODE_R) {
1553 ret = get_all_cpu_total(cntr) - *z_val;
1554 } else if (mode == CNTR_MODE_W) {
1555 /* A write can only zero the counter */
1556 if (data == 0)
1557 *z_val = get_all_cpu_total(cntr);
1558 else
1559 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1560 } else {
1561 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1562 return 0;
1563 }
1564
1565 return ret;
1566}
1567
1568static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1569 void *context, int vl, int mode, u64 data)
1570{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301571 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001572
1573 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1574 mode, data);
1575}
1576
1577static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001578 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001579{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301580 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001581
1582 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1583 mode, data);
1584}
1585
1586static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1587 void *context, int vl, int mode, u64 data)
1588{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301589 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001590
1591 return dd->verbs_dev.n_piowait;
1592}
1593
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001594static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1595 void *context, int vl, int mode, u64 data)
1596{
1597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1598
1599 return dd->verbs_dev.n_piodrain;
1600}
1601
Mike Marciniszyn77241052015-07-30 15:17:43 -04001602static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1604{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301605 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001606
1607 return dd->verbs_dev.n_txwait;
1608}
1609
1610static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1611 void *context, int vl, int mode, u64 data)
1612{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301613 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001614
1615 return dd->verbs_dev.n_kmem_wait;
1616}
1617
Dean Luickb4219222015-10-26 10:28:35 -04001618static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001619 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001620{
1621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1622
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001623 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1624 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001625}
1626
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001627/* Software counters for the error status bits within MISC_ERR_STATUS */
1628static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1629 void *context, int vl, int mode,
1630 u64 data)
1631{
1632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1633
1634 return dd->misc_err_status_cnt[12];
1635}
1636
1637static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1638 void *context, int vl, int mode,
1639 u64 data)
1640{
1641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1642
1643 return dd->misc_err_status_cnt[11];
1644}
1645
1646static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1647 void *context, int vl, int mode,
1648 u64 data)
1649{
1650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1651
1652 return dd->misc_err_status_cnt[10];
1653}
1654
1655static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1656 void *context, int vl,
1657 int mode, u64 data)
1658{
1659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1660
1661 return dd->misc_err_status_cnt[9];
1662}
1663
1664static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1665 void *context, int vl, int mode,
1666 u64 data)
1667{
1668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1669
1670 return dd->misc_err_status_cnt[8];
1671}
1672
1673static u64 access_misc_efuse_read_bad_addr_err_cnt(
1674 const struct cntr_entry *entry,
1675 void *context, int vl, int mode, u64 data)
1676{
1677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1678
1679 return dd->misc_err_status_cnt[7];
1680}
1681
1682static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1683 void *context, int vl,
1684 int mode, u64 data)
1685{
1686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1687
1688 return dd->misc_err_status_cnt[6];
1689}
1690
1691static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1692 void *context, int vl, int mode,
1693 u64 data)
1694{
1695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1696
1697 return dd->misc_err_status_cnt[5];
1698}
1699
1700static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1701 void *context, int vl, int mode,
1702 u64 data)
1703{
1704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1705
1706 return dd->misc_err_status_cnt[4];
1707}
1708
1709static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1710 void *context, int vl,
1711 int mode, u64 data)
1712{
1713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1714
1715 return dd->misc_err_status_cnt[3];
1716}
1717
1718static u64 access_misc_csr_write_bad_addr_err_cnt(
1719 const struct cntr_entry *entry,
1720 void *context, int vl, int mode, u64 data)
1721{
1722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1723
1724 return dd->misc_err_status_cnt[2];
1725}
1726
1727static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1728 void *context, int vl,
1729 int mode, u64 data)
1730{
1731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1732
1733 return dd->misc_err_status_cnt[1];
1734}
1735
1736static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1737 void *context, int vl, int mode,
1738 u64 data)
1739{
1740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1741
1742 return dd->misc_err_status_cnt[0];
1743}
1744
1745/*
1746 * Software counter for the aggregate of
1747 * individual CceErrStatus counters
1748 */
1749static u64 access_sw_cce_err_status_aggregated_cnt(
1750 const struct cntr_entry *entry,
1751 void *context, int vl, int mode, u64 data)
1752{
1753 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1754
1755 return dd->sw_cce_err_status_aggregate;
1756}
1757
1758/*
1759 * Software counters corresponding to each of the
1760 * error status bits within CceErrStatus
1761 */
1762static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1763 void *context, int vl, int mode,
1764 u64 data)
1765{
1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767
1768 return dd->cce_err_status_cnt[40];
1769}
1770
1771static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1772 void *context, int vl, int mode,
1773 u64 data)
1774{
1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776
1777 return dd->cce_err_status_cnt[39];
1778}
1779
1780static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1781 void *context, int vl, int mode,
1782 u64 data)
1783{
1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785
1786 return dd->cce_err_status_cnt[38];
1787}
1788
1789static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1790 void *context, int vl, int mode,
1791 u64 data)
1792{
1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794
1795 return dd->cce_err_status_cnt[37];
1796}
1797
1798static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1799 void *context, int vl, int mode,
1800 u64 data)
1801{
1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803
1804 return dd->cce_err_status_cnt[36];
1805}
1806
1807static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1808 const struct cntr_entry *entry,
1809 void *context, int vl, int mode, u64 data)
1810{
1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812
1813 return dd->cce_err_status_cnt[35];
1814}
1815
1816static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1817 const struct cntr_entry *entry,
1818 void *context, int vl, int mode, u64 data)
1819{
1820 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1821
1822 return dd->cce_err_status_cnt[34];
1823}
1824
1825static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1826 void *context, int vl,
1827 int mode, u64 data)
1828{
1829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1830
1831 return dd->cce_err_status_cnt[33];
1832}
1833
1834static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1835 void *context, int vl, int mode,
1836 u64 data)
1837{
1838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1839
1840 return dd->cce_err_status_cnt[32];
1841}
1842
1843static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1844 void *context, int vl, int mode, u64 data)
1845{
1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847
1848 return dd->cce_err_status_cnt[31];
1849}
1850
1851static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1852 void *context, int vl, int mode,
1853 u64 data)
1854{
1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856
1857 return dd->cce_err_status_cnt[30];
1858}
1859
1860static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1861 void *context, int vl, int mode,
1862 u64 data)
1863{
1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865
1866 return dd->cce_err_status_cnt[29];
1867}
1868
1869static u64 access_pcic_transmit_back_parity_err_cnt(
1870 const struct cntr_entry *entry,
1871 void *context, int vl, int mode, u64 data)
1872{
1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874
1875 return dd->cce_err_status_cnt[28];
1876}
1877
1878static u64 access_pcic_transmit_front_parity_err_cnt(
1879 const struct cntr_entry *entry,
1880 void *context, int vl, int mode, u64 data)
1881{
1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883
1884 return dd->cce_err_status_cnt[27];
1885}
1886
1887static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1888 void *context, int vl, int mode,
1889 u64 data)
1890{
1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892
1893 return dd->cce_err_status_cnt[26];
1894}
1895
1896static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1897 void *context, int vl, int mode,
1898 u64 data)
1899{
1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901
1902 return dd->cce_err_status_cnt[25];
1903}
1904
1905static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1906 void *context, int vl, int mode,
1907 u64 data)
1908{
1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910
1911 return dd->cce_err_status_cnt[24];
1912}
1913
1914static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1915 void *context, int vl, int mode,
1916 u64 data)
1917{
1918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1919
1920 return dd->cce_err_status_cnt[23];
1921}
1922
1923static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1924 void *context, int vl,
1925 int mode, u64 data)
1926{
1927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1928
1929 return dd->cce_err_status_cnt[22];
1930}
1931
1932static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1933 void *context, int vl, int mode,
1934 u64 data)
1935{
1936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1937
1938 return dd->cce_err_status_cnt[21];
1939}
1940
1941static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1942 const struct cntr_entry *entry,
1943 void *context, int vl, int mode, u64 data)
1944{
1945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1946
1947 return dd->cce_err_status_cnt[20];
1948}
1949
1950static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1951 void *context, int vl,
1952 int mode, u64 data)
1953{
1954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1955
1956 return dd->cce_err_status_cnt[19];
1957}
1958
1959static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1960 void *context, int vl, int mode,
1961 u64 data)
1962{
1963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1964
1965 return dd->cce_err_status_cnt[18];
1966}
1967
1968static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1969 void *context, int vl, int mode,
1970 u64 data)
1971{
1972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1973
1974 return dd->cce_err_status_cnt[17];
1975}
1976
1977static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1978 void *context, int vl, int mode,
1979 u64 data)
1980{
1981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1982
1983 return dd->cce_err_status_cnt[16];
1984}
1985
1986static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1987 void *context, int vl, int mode,
1988 u64 data)
1989{
1990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1991
1992 return dd->cce_err_status_cnt[15];
1993}
1994
1995static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1996 void *context, int vl,
1997 int mode, u64 data)
1998{
1999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2000
2001 return dd->cce_err_status_cnt[14];
2002}
2003
2004static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2005 void *context, int vl, int mode,
2006 u64 data)
2007{
2008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2009
2010 return dd->cce_err_status_cnt[13];
2011}
2012
2013static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2014 const struct cntr_entry *entry,
2015 void *context, int vl, int mode, u64 data)
2016{
2017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2018
2019 return dd->cce_err_status_cnt[12];
2020}
2021
2022static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2023 const struct cntr_entry *entry,
2024 void *context, int vl, int mode, u64 data)
2025{
2026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2027
2028 return dd->cce_err_status_cnt[11];
2029}
2030
2031static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2032 const struct cntr_entry *entry,
2033 void *context, int vl, int mode, u64 data)
2034{
2035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2036
2037 return dd->cce_err_status_cnt[10];
2038}
2039
2040static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2041 const struct cntr_entry *entry,
2042 void *context, int vl, int mode, u64 data)
2043{
2044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2045
2046 return dd->cce_err_status_cnt[9];
2047}
2048
2049static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2050 const struct cntr_entry *entry,
2051 void *context, int vl, int mode, u64 data)
2052{
2053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2054
2055 return dd->cce_err_status_cnt[8];
2056}
2057
2058static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2059 void *context, int vl,
2060 int mode, u64 data)
2061{
2062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2063
2064 return dd->cce_err_status_cnt[7];
2065}
2066
2067static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2068 const struct cntr_entry *entry,
2069 void *context, int vl, int mode, u64 data)
2070{
2071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2072
2073 return dd->cce_err_status_cnt[6];
2074}
2075
2076static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2077 void *context, int vl, int mode,
2078 u64 data)
2079{
2080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2081
2082 return dd->cce_err_status_cnt[5];
2083}
2084
2085static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2086 void *context, int vl, int mode,
2087 u64 data)
2088{
2089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2090
2091 return dd->cce_err_status_cnt[4];
2092}
2093
2094static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2095 const struct cntr_entry *entry,
2096 void *context, int vl, int mode, u64 data)
2097{
2098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2099
2100 return dd->cce_err_status_cnt[3];
2101}
2102
2103static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2104 void *context, int vl,
2105 int mode, u64 data)
2106{
2107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2108
2109 return dd->cce_err_status_cnt[2];
2110}
2111
2112static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2113 void *context, int vl,
2114 int mode, u64 data)
2115{
2116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2117
2118 return dd->cce_err_status_cnt[1];
2119}
2120
2121static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2122 void *context, int vl, int mode,
2123 u64 data)
2124{
2125 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2126
2127 return dd->cce_err_status_cnt[0];
2128}
2129
2130/*
2131 * Software counters corresponding to each of the
2132 * error status bits within RcvErrStatus
2133 */
2134static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2135 void *context, int vl, int mode,
2136 u64 data)
2137{
2138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2139
2140 return dd->rcv_err_status_cnt[63];
2141}
2142
2143static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2144 void *context, int vl,
2145 int mode, u64 data)
2146{
2147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2148
2149 return dd->rcv_err_status_cnt[62];
2150}
2151
2152static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2153 void *context, int vl, int mode,
2154 u64 data)
2155{
2156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2157
2158 return dd->rcv_err_status_cnt[61];
2159}
2160
2161static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2162 void *context, int vl, int mode,
2163 u64 data)
2164{
2165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2166
2167 return dd->rcv_err_status_cnt[60];
2168}
2169
2170static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2171 void *context, int vl,
2172 int mode, u64 data)
2173{
2174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2175
2176 return dd->rcv_err_status_cnt[59];
2177}
2178
2179static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2180 void *context, int vl,
2181 int mode, u64 data)
2182{
2183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2184
2185 return dd->rcv_err_status_cnt[58];
2186}
2187
2188static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2189 void *context, int vl, int mode,
2190 u64 data)
2191{
2192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2193
2194 return dd->rcv_err_status_cnt[57];
2195}
2196
2197static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2198 void *context, int vl, int mode,
2199 u64 data)
2200{
2201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2202
2203 return dd->rcv_err_status_cnt[56];
2204}
2205
2206static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2207 void *context, int vl, int mode,
2208 u64 data)
2209{
2210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2211
2212 return dd->rcv_err_status_cnt[55];
2213}
2214
2215static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2216 const struct cntr_entry *entry,
2217 void *context, int vl, int mode, u64 data)
2218{
2219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2220
2221 return dd->rcv_err_status_cnt[54];
2222}
2223
2224static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2225 const struct cntr_entry *entry,
2226 void *context, int vl, int mode, u64 data)
2227{
2228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2229
2230 return dd->rcv_err_status_cnt[53];
2231}
2232
2233static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2234 void *context, int vl,
2235 int mode, u64 data)
2236{
2237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2238
2239 return dd->rcv_err_status_cnt[52];
2240}
2241
2242static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2243 void *context, int vl,
2244 int mode, u64 data)
2245{
2246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2247
2248 return dd->rcv_err_status_cnt[51];
2249}
2250
2251static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2252 void *context, int vl,
2253 int mode, u64 data)
2254{
2255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2256
2257 return dd->rcv_err_status_cnt[50];
2258}
2259
2260static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2261 void *context, int vl,
2262 int mode, u64 data)
2263{
2264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2265
2266 return dd->rcv_err_status_cnt[49];
2267}
2268
2269static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2270 void *context, int vl,
2271 int mode, u64 data)
2272{
2273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2274
2275 return dd->rcv_err_status_cnt[48];
2276}
2277
2278static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2279 void *context, int vl,
2280 int mode, u64 data)
2281{
2282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2283
2284 return dd->rcv_err_status_cnt[47];
2285}
2286
2287static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2288 void *context, int vl, int mode,
2289 u64 data)
2290{
2291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2292
2293 return dd->rcv_err_status_cnt[46];
2294}
2295
2296static u64 access_rx_hq_intr_csr_parity_err_cnt(
2297 const struct cntr_entry *entry,
2298 void *context, int vl, int mode, u64 data)
2299{
2300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2301
2302 return dd->rcv_err_status_cnt[45];
2303}
2304
2305static u64 access_rx_lookup_csr_parity_err_cnt(
2306 const struct cntr_entry *entry,
2307 void *context, int vl, int mode, u64 data)
2308{
2309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2310
2311 return dd->rcv_err_status_cnt[44];
2312}
2313
2314static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2315 const struct cntr_entry *entry,
2316 void *context, int vl, int mode, u64 data)
2317{
2318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2319
2320 return dd->rcv_err_status_cnt[43];
2321}
2322
2323static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2324 const struct cntr_entry *entry,
2325 void *context, int vl, int mode, u64 data)
2326{
2327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2328
2329 return dd->rcv_err_status_cnt[42];
2330}
2331
2332static u64 access_rx_lookup_des_part2_parity_err_cnt(
2333 const struct cntr_entry *entry,
2334 void *context, int vl, int mode, u64 data)
2335{
2336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2337
2338 return dd->rcv_err_status_cnt[41];
2339}
2340
2341static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2342 const struct cntr_entry *entry,
2343 void *context, int vl, int mode, u64 data)
2344{
2345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2346
2347 return dd->rcv_err_status_cnt[40];
2348}
2349
2350static u64 access_rx_lookup_des_part1_unc_err_cnt(
2351 const struct cntr_entry *entry,
2352 void *context, int vl, int mode, u64 data)
2353{
2354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2355
2356 return dd->rcv_err_status_cnt[39];
2357}
2358
2359static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2360 const struct cntr_entry *entry,
2361 void *context, int vl, int mode, u64 data)
2362{
2363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2364
2365 return dd->rcv_err_status_cnt[38];
2366}
2367
2368static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2369 const struct cntr_entry *entry,
2370 void *context, int vl, int mode, u64 data)
2371{
2372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2373
2374 return dd->rcv_err_status_cnt[37];
2375}
2376
2377static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2378 const struct cntr_entry *entry,
2379 void *context, int vl, int mode, u64 data)
2380{
2381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2382
2383 return dd->rcv_err_status_cnt[36];
2384}
2385
2386static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2387 const struct cntr_entry *entry,
2388 void *context, int vl, int mode, u64 data)
2389{
2390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2391
2392 return dd->rcv_err_status_cnt[35];
2393}
2394
2395static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2396 const struct cntr_entry *entry,
2397 void *context, int vl, int mode, u64 data)
2398{
2399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2400
2401 return dd->rcv_err_status_cnt[34];
2402}
2403
2404static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2405 const struct cntr_entry *entry,
2406 void *context, int vl, int mode, u64 data)
2407{
2408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2409
2410 return dd->rcv_err_status_cnt[33];
2411}
2412
2413static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2414 void *context, int vl, int mode,
2415 u64 data)
2416{
2417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2418
2419 return dd->rcv_err_status_cnt[32];
2420}
2421
2422static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2423 void *context, int vl, int mode,
2424 u64 data)
2425{
2426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2427
2428 return dd->rcv_err_status_cnt[31];
2429}
2430
2431static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2432 void *context, int vl, int mode,
2433 u64 data)
2434{
2435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2436
2437 return dd->rcv_err_status_cnt[30];
2438}
2439
2440static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2441 void *context, int vl, int mode,
2442 u64 data)
2443{
2444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2445
2446 return dd->rcv_err_status_cnt[29];
2447}
2448
2449static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2450 void *context, int vl,
2451 int mode, u64 data)
2452{
2453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2454
2455 return dd->rcv_err_status_cnt[28];
2456}
2457
2458static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2459 const struct cntr_entry *entry,
2460 void *context, int vl, int mode, u64 data)
2461{
2462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2463
2464 return dd->rcv_err_status_cnt[27];
2465}
2466
2467static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2468 const struct cntr_entry *entry,
2469 void *context, int vl, int mode, u64 data)
2470{
2471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2472
2473 return dd->rcv_err_status_cnt[26];
2474}
2475
2476static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2477 const struct cntr_entry *entry,
2478 void *context, int vl, int mode, u64 data)
2479{
2480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2481
2482 return dd->rcv_err_status_cnt[25];
2483}
2484
2485static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2486 const struct cntr_entry *entry,
2487 void *context, int vl, int mode, u64 data)
2488{
2489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2490
2491 return dd->rcv_err_status_cnt[24];
2492}
2493
2494static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2495 const struct cntr_entry *entry,
2496 void *context, int vl, int mode, u64 data)
2497{
2498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2499
2500 return dd->rcv_err_status_cnt[23];
2501}
2502
2503static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2504 const struct cntr_entry *entry,
2505 void *context, int vl, int mode, u64 data)
2506{
2507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2508
2509 return dd->rcv_err_status_cnt[22];
2510}
2511
2512static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2513 const struct cntr_entry *entry,
2514 void *context, int vl, int mode, u64 data)
2515{
2516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2517
2518 return dd->rcv_err_status_cnt[21];
2519}
2520
2521static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2522 const struct cntr_entry *entry,
2523 void *context, int vl, int mode, u64 data)
2524{
2525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2526
2527 return dd->rcv_err_status_cnt[20];
2528}
2529
2530static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2531 const struct cntr_entry *entry,
2532 void *context, int vl, int mode, u64 data)
2533{
2534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2535
2536 return dd->rcv_err_status_cnt[19];
2537}
2538
2539static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2540 void *context, int vl,
2541 int mode, u64 data)
2542{
2543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2544
2545 return dd->rcv_err_status_cnt[18];
2546}
2547
2548static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2549 void *context, int vl,
2550 int mode, u64 data)
2551{
2552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2553
2554 return dd->rcv_err_status_cnt[17];
2555}
2556
2557static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2558 const struct cntr_entry *entry,
2559 void *context, int vl, int mode, u64 data)
2560{
2561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2562
2563 return dd->rcv_err_status_cnt[16];
2564}
2565
2566static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2567 const struct cntr_entry *entry,
2568 void *context, int vl, int mode, u64 data)
2569{
2570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2571
2572 return dd->rcv_err_status_cnt[15];
2573}
2574
2575static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2576 void *context, int vl,
2577 int mode, u64 data)
2578{
2579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2580
2581 return dd->rcv_err_status_cnt[14];
2582}
2583
2584static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2585 void *context, int vl,
2586 int mode, u64 data)
2587{
2588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2589
2590 return dd->rcv_err_status_cnt[13];
2591}
2592
2593static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2594 void *context, int vl, int mode,
2595 u64 data)
2596{
2597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2598
2599 return dd->rcv_err_status_cnt[12];
2600}
2601
2602static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2603 void *context, int vl, int mode,
2604 u64 data)
2605{
2606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2607
2608 return dd->rcv_err_status_cnt[11];
2609}
2610
2611static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2612 void *context, int vl, int mode,
2613 u64 data)
2614{
2615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2616
2617 return dd->rcv_err_status_cnt[10];
2618}
2619
2620static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2621 void *context, int vl, int mode,
2622 u64 data)
2623{
2624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2625
2626 return dd->rcv_err_status_cnt[9];
2627}
2628
2629static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2630 void *context, int vl, int mode,
2631 u64 data)
2632{
2633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2634
2635 return dd->rcv_err_status_cnt[8];
2636}
2637
2638static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2639 const struct cntr_entry *entry,
2640 void *context, int vl, int mode, u64 data)
2641{
2642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2643
2644 return dd->rcv_err_status_cnt[7];
2645}
2646
2647static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2648 const struct cntr_entry *entry,
2649 void *context, int vl, int mode, u64 data)
2650{
2651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2652
2653 return dd->rcv_err_status_cnt[6];
2654}
2655
2656static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2657 void *context, int vl, int mode,
2658 u64 data)
2659{
2660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2661
2662 return dd->rcv_err_status_cnt[5];
2663}
2664
2665static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2666 void *context, int vl, int mode,
2667 u64 data)
2668{
2669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2670
2671 return dd->rcv_err_status_cnt[4];
2672}
2673
2674static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2675 void *context, int vl, int mode,
2676 u64 data)
2677{
2678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2679
2680 return dd->rcv_err_status_cnt[3];
2681}
2682
2683static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2684 void *context, int vl, int mode,
2685 u64 data)
2686{
2687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2688
2689 return dd->rcv_err_status_cnt[2];
2690}
2691
2692static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2693 void *context, int vl, int mode,
2694 u64 data)
2695{
2696 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2697
2698 return dd->rcv_err_status_cnt[1];
2699}
2700
2701static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2702 void *context, int vl, int mode,
2703 u64 data)
2704{
2705 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2706
2707 return dd->rcv_err_status_cnt[0];
2708}
2709
2710/*
2711 * Software counters corresponding to each of the
2712 * error status bits within SendPioErrStatus
2713 */
2714static u64 access_pio_pec_sop_head_parity_err_cnt(
2715 const struct cntr_entry *entry,
2716 void *context, int vl, int mode, u64 data)
2717{
2718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2719
2720 return dd->send_pio_err_status_cnt[35];
2721}
2722
2723static u64 access_pio_pcc_sop_head_parity_err_cnt(
2724 const struct cntr_entry *entry,
2725 void *context, int vl, int mode, u64 data)
2726{
2727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2728
2729 return dd->send_pio_err_status_cnt[34];
2730}
2731
2732static u64 access_pio_last_returned_cnt_parity_err_cnt(
2733 const struct cntr_entry *entry,
2734 void *context, int vl, int mode, u64 data)
2735{
2736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2737
2738 return dd->send_pio_err_status_cnt[33];
2739}
2740
2741static u64 access_pio_current_free_cnt_parity_err_cnt(
2742 const struct cntr_entry *entry,
2743 void *context, int vl, int mode, u64 data)
2744{
2745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2746
2747 return dd->send_pio_err_status_cnt[32];
2748}
2749
2750static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2751 void *context, int vl, int mode,
2752 u64 data)
2753{
2754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2755
2756 return dd->send_pio_err_status_cnt[31];
2757}
2758
2759static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2760 void *context, int vl, int mode,
2761 u64 data)
2762{
2763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2764
2765 return dd->send_pio_err_status_cnt[30];
2766}
2767
2768static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2769 void *context, int vl, int mode,
2770 u64 data)
2771{
2772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2773
2774 return dd->send_pio_err_status_cnt[29];
2775}
2776
2777static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2778 const struct cntr_entry *entry,
2779 void *context, int vl, int mode, u64 data)
2780{
2781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2782
2783 return dd->send_pio_err_status_cnt[28];
2784}
2785
2786static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2787 void *context, int vl, int mode,
2788 u64 data)
2789{
2790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2791
2792 return dd->send_pio_err_status_cnt[27];
2793}
2794
2795static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2796 void *context, int vl, int mode,
2797 u64 data)
2798{
2799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2800
2801 return dd->send_pio_err_status_cnt[26];
2802}
2803
2804static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2805 void *context, int vl,
2806 int mode, u64 data)
2807{
2808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2809
2810 return dd->send_pio_err_status_cnt[25];
2811}
2812
2813static u64 access_pio_block_qw_count_parity_err_cnt(
2814 const struct cntr_entry *entry,
2815 void *context, int vl, int mode, u64 data)
2816{
2817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2818
2819 return dd->send_pio_err_status_cnt[24];
2820}
2821
2822static u64 access_pio_write_qw_valid_parity_err_cnt(
2823 const struct cntr_entry *entry,
2824 void *context, int vl, int mode, u64 data)
2825{
2826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2827
2828 return dd->send_pio_err_status_cnt[23];
2829}
2830
2831static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2832 void *context, int vl, int mode,
2833 u64 data)
2834{
2835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2836
2837 return dd->send_pio_err_status_cnt[22];
2838}
2839
2840static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2841 void *context, int vl,
2842 int mode, u64 data)
2843{
2844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2845
2846 return dd->send_pio_err_status_cnt[21];
2847}
2848
2849static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2850 void *context, int vl,
2851 int mode, u64 data)
2852{
2853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2854
2855 return dd->send_pio_err_status_cnt[20];
2856}
2857
2858static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2859 void *context, int vl,
2860 int mode, u64 data)
2861{
2862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2863
2864 return dd->send_pio_err_status_cnt[19];
2865}
2866
2867static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2868 const struct cntr_entry *entry,
2869 void *context, int vl, int mode, u64 data)
2870{
2871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2872
2873 return dd->send_pio_err_status_cnt[18];
2874}
2875
2876static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2877 void *context, int vl, int mode,
2878 u64 data)
2879{
2880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2881
2882 return dd->send_pio_err_status_cnt[17];
2883}
2884
2885static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2886 void *context, int vl, int mode,
2887 u64 data)
2888{
2889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2890
2891 return dd->send_pio_err_status_cnt[16];
2892}
2893
2894static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2895 const struct cntr_entry *entry,
2896 void *context, int vl, int mode, u64 data)
2897{
2898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2899
2900 return dd->send_pio_err_status_cnt[15];
2901}
2902
2903static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2904 const struct cntr_entry *entry,
2905 void *context, int vl, int mode, u64 data)
2906{
2907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2908
2909 return dd->send_pio_err_status_cnt[14];
2910}
2911
2912static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2913 const struct cntr_entry *entry,
2914 void *context, int vl, int mode, u64 data)
2915{
2916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2917
2918 return dd->send_pio_err_status_cnt[13];
2919}
2920
2921static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2922 const struct cntr_entry *entry,
2923 void *context, int vl, int mode, u64 data)
2924{
2925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2926
2927 return dd->send_pio_err_status_cnt[12];
2928}
2929
2930static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2931 const struct cntr_entry *entry,
2932 void *context, int vl, int mode, u64 data)
2933{
2934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2935
2936 return dd->send_pio_err_status_cnt[11];
2937}
2938
2939static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2940 const struct cntr_entry *entry,
2941 void *context, int vl, int mode, u64 data)
2942{
2943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2944
2945 return dd->send_pio_err_status_cnt[10];
2946}
2947
2948static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2949 const struct cntr_entry *entry,
2950 void *context, int vl, int mode, u64 data)
2951{
2952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2953
2954 return dd->send_pio_err_status_cnt[9];
2955}
2956
2957static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2958 const struct cntr_entry *entry,
2959 void *context, int vl, int mode, u64 data)
2960{
2961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2962
2963 return dd->send_pio_err_status_cnt[8];
2964}
2965
2966static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2967 const struct cntr_entry *entry,
2968 void *context, int vl, int mode, u64 data)
2969{
2970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2971
2972 return dd->send_pio_err_status_cnt[7];
2973}
2974
2975static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2976 void *context, int vl, int mode,
2977 u64 data)
2978{
2979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2980
2981 return dd->send_pio_err_status_cnt[6];
2982}
2983
2984static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2985 void *context, int vl, int mode,
2986 u64 data)
2987{
2988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2989
2990 return dd->send_pio_err_status_cnt[5];
2991}
2992
2993static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2994 void *context, int vl, int mode,
2995 u64 data)
2996{
2997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2998
2999 return dd->send_pio_err_status_cnt[4];
3000}
3001
3002static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3003 void *context, int vl, int mode,
3004 u64 data)
3005{
3006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3007
3008 return dd->send_pio_err_status_cnt[3];
3009}
3010
3011static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3012 void *context, int vl, int mode,
3013 u64 data)
3014{
3015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3016
3017 return dd->send_pio_err_status_cnt[2];
3018}
3019
3020static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3021 void *context, int vl,
3022 int mode, u64 data)
3023{
3024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3025
3026 return dd->send_pio_err_status_cnt[1];
3027}
3028
3029static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3030 void *context, int vl, int mode,
3031 u64 data)
3032{
3033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3034
3035 return dd->send_pio_err_status_cnt[0];
3036}
3037
3038/*
3039 * Software counters corresponding to each of the
3040 * error status bits within SendDmaErrStatus
3041 */
3042static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3043 const struct cntr_entry *entry,
3044 void *context, int vl, int mode, u64 data)
3045{
3046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3047
3048 return dd->send_dma_err_status_cnt[3];
3049}
3050
3051static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3052 const struct cntr_entry *entry,
3053 void *context, int vl, int mode, u64 data)
3054{
3055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3056
3057 return dd->send_dma_err_status_cnt[2];
3058}
3059
3060static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3061 void *context, int vl, int mode,
3062 u64 data)
3063{
3064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3065
3066 return dd->send_dma_err_status_cnt[1];
3067}
3068
3069static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3070 void *context, int vl, int mode,
3071 u64 data)
3072{
3073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3074
3075 return dd->send_dma_err_status_cnt[0];
3076}
3077
3078/*
3079 * Software counters corresponding to each of the
3080 * error status bits within SendEgressErrStatus
3081 */
3082static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3083 const struct cntr_entry *entry,
3084 void *context, int vl, int mode, u64 data)
3085{
3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087
3088 return dd->send_egress_err_status_cnt[63];
3089}
3090
3091static u64 access_tx_read_sdma_memory_csr_err_cnt(
3092 const struct cntr_entry *entry,
3093 void *context, int vl, int mode, u64 data)
3094{
3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096
3097 return dd->send_egress_err_status_cnt[62];
3098}
3099
3100static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3101 void *context, int vl, int mode,
3102 u64 data)
3103{
3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105
3106 return dd->send_egress_err_status_cnt[61];
3107}
3108
3109static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3110 void *context, int vl,
3111 int mode, u64 data)
3112{
3113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3114
3115 return dd->send_egress_err_status_cnt[60];
3116}
3117
3118static u64 access_tx_read_sdma_memory_cor_err_cnt(
3119 const struct cntr_entry *entry,
3120 void *context, int vl, int mode, u64 data)
3121{
3122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3123
3124 return dd->send_egress_err_status_cnt[59];
3125}
3126
3127static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3128 void *context, int vl, int mode,
3129 u64 data)
3130{
3131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3132
3133 return dd->send_egress_err_status_cnt[58];
3134}
3135
3136static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3137 void *context, int vl, int mode,
3138 u64 data)
3139{
3140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3141
3142 return dd->send_egress_err_status_cnt[57];
3143}
3144
3145static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3146 void *context, int vl, int mode,
3147 u64 data)
3148{
3149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3150
3151 return dd->send_egress_err_status_cnt[56];
3152}
3153
3154static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3155 void *context, int vl, int mode,
3156 u64 data)
3157{
3158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3159
3160 return dd->send_egress_err_status_cnt[55];
3161}
3162
3163static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3164 void *context, int vl, int mode,
3165 u64 data)
3166{
3167 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3168
3169 return dd->send_egress_err_status_cnt[54];
3170}
3171
3172static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3173 void *context, int vl, int mode,
3174 u64 data)
3175{
3176 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3177
3178 return dd->send_egress_err_status_cnt[53];
3179}
3180
3181static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3182 void *context, int vl, int mode,
3183 u64 data)
3184{
3185 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3186
3187 return dd->send_egress_err_status_cnt[52];
3188}
3189
3190static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3191 void *context, int vl, int mode,
3192 u64 data)
3193{
3194 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3195
3196 return dd->send_egress_err_status_cnt[51];
3197}
3198
3199static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3200 void *context, int vl, int mode,
3201 u64 data)
3202{
3203 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3204
3205 return dd->send_egress_err_status_cnt[50];
3206}
3207
3208static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3209 void *context, int vl, int mode,
3210 u64 data)
3211{
3212 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3213
3214 return dd->send_egress_err_status_cnt[49];
3215}
3216
3217static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3218 void *context, int vl, int mode,
3219 u64 data)
3220{
3221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3222
3223 return dd->send_egress_err_status_cnt[48];
3224}
3225
3226static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3227 void *context, int vl, int mode,
3228 u64 data)
3229{
3230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3231
3232 return dd->send_egress_err_status_cnt[47];
3233}
3234
3235static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3236 void *context, int vl, int mode,
3237 u64 data)
3238{
3239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3240
3241 return dd->send_egress_err_status_cnt[46];
3242}
3243
3244static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3245 void *context, int vl, int mode,
3246 u64 data)
3247{
3248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3249
3250 return dd->send_egress_err_status_cnt[45];
3251}
3252
3253static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3254 void *context, int vl,
3255 int mode, u64 data)
3256{
3257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3258
3259 return dd->send_egress_err_status_cnt[44];
3260}
3261
3262static u64 access_tx_read_sdma_memory_unc_err_cnt(
3263 const struct cntr_entry *entry,
3264 void *context, int vl, int mode, u64 data)
3265{
3266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3267
3268 return dd->send_egress_err_status_cnt[43];
3269}
3270
3271static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3272 void *context, int vl, int mode,
3273 u64 data)
3274{
3275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3276
3277 return dd->send_egress_err_status_cnt[42];
3278}
3279
3280static u64 access_tx_credit_return_partiy_err_cnt(
3281 const struct cntr_entry *entry,
3282 void *context, int vl, int mode, u64 data)
3283{
3284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3285
3286 return dd->send_egress_err_status_cnt[41];
3287}
3288
3289static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3290 const struct cntr_entry *entry,
3291 void *context, int vl, int mode, u64 data)
3292{
3293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3294
3295 return dd->send_egress_err_status_cnt[40];
3296}
3297
3298static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3299 const struct cntr_entry *entry,
3300 void *context, int vl, int mode, u64 data)
3301{
3302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3303
3304 return dd->send_egress_err_status_cnt[39];
3305}
3306
3307static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3308 const struct cntr_entry *entry,
3309 void *context, int vl, int mode, u64 data)
3310{
3311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3312
3313 return dd->send_egress_err_status_cnt[38];
3314}
3315
3316static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3317 const struct cntr_entry *entry,
3318 void *context, int vl, int mode, u64 data)
3319{
3320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3321
3322 return dd->send_egress_err_status_cnt[37];
3323}
3324
3325static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3326 const struct cntr_entry *entry,
3327 void *context, int vl, int mode, u64 data)
3328{
3329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3330
3331 return dd->send_egress_err_status_cnt[36];
3332}
3333
3334static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3335 const struct cntr_entry *entry,
3336 void *context, int vl, int mode, u64 data)
3337{
3338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3339
3340 return dd->send_egress_err_status_cnt[35];
3341}
3342
3343static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3344 const struct cntr_entry *entry,
3345 void *context, int vl, int mode, u64 data)
3346{
3347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3348
3349 return dd->send_egress_err_status_cnt[34];
3350}
3351
3352static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3353 const struct cntr_entry *entry,
3354 void *context, int vl, int mode, u64 data)
3355{
3356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3357
3358 return dd->send_egress_err_status_cnt[33];
3359}
3360
3361static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3362 const struct cntr_entry *entry,
3363 void *context, int vl, int mode, u64 data)
3364{
3365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3366
3367 return dd->send_egress_err_status_cnt[32];
3368}
3369
3370static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3371 const struct cntr_entry *entry,
3372 void *context, int vl, int mode, u64 data)
3373{
3374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3375
3376 return dd->send_egress_err_status_cnt[31];
3377}
3378
3379static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3380 const struct cntr_entry *entry,
3381 void *context, int vl, int mode, u64 data)
3382{
3383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3384
3385 return dd->send_egress_err_status_cnt[30];
3386}
3387
3388static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3389 const struct cntr_entry *entry,
3390 void *context, int vl, int mode, u64 data)
3391{
3392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3393
3394 return dd->send_egress_err_status_cnt[29];
3395}
3396
3397static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3398 const struct cntr_entry *entry,
3399 void *context, int vl, int mode, u64 data)
3400{
3401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3402
3403 return dd->send_egress_err_status_cnt[28];
3404}
3405
3406static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3407 const struct cntr_entry *entry,
3408 void *context, int vl, int mode, u64 data)
3409{
3410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3411
3412 return dd->send_egress_err_status_cnt[27];
3413}
3414
3415static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3416 const struct cntr_entry *entry,
3417 void *context, int vl, int mode, u64 data)
3418{
3419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3420
3421 return dd->send_egress_err_status_cnt[26];
3422}
3423
3424static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3425 const struct cntr_entry *entry,
3426 void *context, int vl, int mode, u64 data)
3427{
3428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3429
3430 return dd->send_egress_err_status_cnt[25];
3431}
3432
3433static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3434 const struct cntr_entry *entry,
3435 void *context, int vl, int mode, u64 data)
3436{
3437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3438
3439 return dd->send_egress_err_status_cnt[24];
3440}
3441
3442static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3443 const struct cntr_entry *entry,
3444 void *context, int vl, int mode, u64 data)
3445{
3446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3447
3448 return dd->send_egress_err_status_cnt[23];
3449}
3450
3451static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3452 const struct cntr_entry *entry,
3453 void *context, int vl, int mode, u64 data)
3454{
3455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3456
3457 return dd->send_egress_err_status_cnt[22];
3458}
3459
3460static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3461 const struct cntr_entry *entry,
3462 void *context, int vl, int mode, u64 data)
3463{
3464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3465
3466 return dd->send_egress_err_status_cnt[21];
3467}
3468
3469static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3470 const struct cntr_entry *entry,
3471 void *context, int vl, int mode, u64 data)
3472{
3473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3474
3475 return dd->send_egress_err_status_cnt[20];
3476}
3477
3478static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3479 const struct cntr_entry *entry,
3480 void *context, int vl, int mode, u64 data)
3481{
3482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3483
3484 return dd->send_egress_err_status_cnt[19];
3485}
3486
3487static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3488 const struct cntr_entry *entry,
3489 void *context, int vl, int mode, u64 data)
3490{
3491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3492
3493 return dd->send_egress_err_status_cnt[18];
3494}
3495
3496static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3497 const struct cntr_entry *entry,
3498 void *context, int vl, int mode, u64 data)
3499{
3500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3501
3502 return dd->send_egress_err_status_cnt[17];
3503}
3504
3505static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3506 const struct cntr_entry *entry,
3507 void *context, int vl, int mode, u64 data)
3508{
3509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3510
3511 return dd->send_egress_err_status_cnt[16];
3512}
3513
3514static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3515 void *context, int vl, int mode,
3516 u64 data)
3517{
3518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3519
3520 return dd->send_egress_err_status_cnt[15];
3521}
3522
3523static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3524 void *context, int vl,
3525 int mode, u64 data)
3526{
3527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3528
3529 return dd->send_egress_err_status_cnt[14];
3530}
3531
3532static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3533 void *context, int vl, int mode,
3534 u64 data)
3535{
3536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3537
3538 return dd->send_egress_err_status_cnt[13];
3539}
3540
3541static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3542 void *context, int vl, int mode,
3543 u64 data)
3544{
3545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3546
3547 return dd->send_egress_err_status_cnt[12];
3548}
3549
3550static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3551 const struct cntr_entry *entry,
3552 void *context, int vl, int mode, u64 data)
3553{
3554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3555
3556 return dd->send_egress_err_status_cnt[11];
3557}
3558
3559static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3560 void *context, int vl, int mode,
3561 u64 data)
3562{
3563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3564
3565 return dd->send_egress_err_status_cnt[10];
3566}
3567
3568static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3569 void *context, int vl, int mode,
3570 u64 data)
3571{
3572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3573
3574 return dd->send_egress_err_status_cnt[9];
3575}
3576
3577static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3578 const struct cntr_entry *entry,
3579 void *context, int vl, int mode, u64 data)
3580{
3581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3582
3583 return dd->send_egress_err_status_cnt[8];
3584}
3585
3586static u64 access_tx_pio_launch_intf_parity_err_cnt(
3587 const struct cntr_entry *entry,
3588 void *context, int vl, int mode, u64 data)
3589{
3590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3591
3592 return dd->send_egress_err_status_cnt[7];
3593}
3594
3595static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3596 void *context, int vl, int mode,
3597 u64 data)
3598{
3599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3600
3601 return dd->send_egress_err_status_cnt[6];
3602}
3603
3604static u64 access_tx_incorrect_link_state_err_cnt(
3605 const struct cntr_entry *entry,
3606 void *context, int vl, int mode, u64 data)
3607{
3608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3609
3610 return dd->send_egress_err_status_cnt[5];
3611}
3612
3613static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3614 void *context, int vl, int mode,
3615 u64 data)
3616{
3617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3618
3619 return dd->send_egress_err_status_cnt[4];
3620}
3621
3622static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3623 const struct cntr_entry *entry,
3624 void *context, int vl, int mode, u64 data)
3625{
3626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3627
3628 return dd->send_egress_err_status_cnt[3];
3629}
3630
3631static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3632 void *context, int vl, int mode,
3633 u64 data)
3634{
3635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3636
3637 return dd->send_egress_err_status_cnt[2];
3638}
3639
3640static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3641 const struct cntr_entry *entry,
3642 void *context, int vl, int mode, u64 data)
3643{
3644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3645
3646 return dd->send_egress_err_status_cnt[1];
3647}
3648
3649static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3650 const struct cntr_entry *entry,
3651 void *context, int vl, int mode, u64 data)
3652{
3653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3654
3655 return dd->send_egress_err_status_cnt[0];
3656}
3657
3658/*
3659 * Software counters corresponding to each of the
3660 * error status bits within SendErrStatus
3661 */
3662static u64 access_send_csr_write_bad_addr_err_cnt(
3663 const struct cntr_entry *entry,
3664 void *context, int vl, int mode, u64 data)
3665{
3666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3667
3668 return dd->send_err_status_cnt[2];
3669}
3670
3671static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3672 void *context, int vl,
3673 int mode, u64 data)
3674{
3675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3676
3677 return dd->send_err_status_cnt[1];
3678}
3679
3680static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3681 void *context, int vl, int mode,
3682 u64 data)
3683{
3684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3685
3686 return dd->send_err_status_cnt[0];
3687}
3688
3689/*
3690 * Software counters corresponding to each of the
3691 * error status bits within SendCtxtErrStatus
3692 */
3693static u64 access_pio_write_out_of_bounds_err_cnt(
3694 const struct cntr_entry *entry,
3695 void *context, int vl, int mode, u64 data)
3696{
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698
3699 return dd->sw_ctxt_err_status_cnt[4];
3700}
3701
3702static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3703 void *context, int vl, int mode,
3704 u64 data)
3705{
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707
3708 return dd->sw_ctxt_err_status_cnt[3];
3709}
3710
3711static u64 access_pio_write_crosses_boundary_err_cnt(
3712 const struct cntr_entry *entry,
3713 void *context, int vl, int mode, u64 data)
3714{
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716
3717 return dd->sw_ctxt_err_status_cnt[2];
3718}
3719
3720static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3721 void *context, int vl,
3722 int mode, u64 data)
3723{
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725
3726 return dd->sw_ctxt_err_status_cnt[1];
3727}
3728
3729static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3730 void *context, int vl, int mode,
3731 u64 data)
3732{
3733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3734
3735 return dd->sw_ctxt_err_status_cnt[0];
3736}
3737
3738/*
3739 * Software counters corresponding to each of the
3740 * error status bits within SendDmaEngErrStatus
3741 */
3742static u64 access_sdma_header_request_fifo_cor_err_cnt(
3743 const struct cntr_entry *entry,
3744 void *context, int vl, int mode, u64 data)
3745{
3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747
3748 return dd->sw_send_dma_eng_err_status_cnt[23];
3749}
3750
3751static u64 access_sdma_header_storage_cor_err_cnt(
3752 const struct cntr_entry *entry,
3753 void *context, int vl, int mode, u64 data)
3754{
3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756
3757 return dd->sw_send_dma_eng_err_status_cnt[22];
3758}
3759
3760static u64 access_sdma_packet_tracking_cor_err_cnt(
3761 const struct cntr_entry *entry,
3762 void *context, int vl, int mode, u64 data)
3763{
3764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3765
3766 return dd->sw_send_dma_eng_err_status_cnt[21];
3767}
3768
3769static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3770 void *context, int vl, int mode,
3771 u64 data)
3772{
3773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3774
3775 return dd->sw_send_dma_eng_err_status_cnt[20];
3776}
3777
3778static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3779 void *context, int vl, int mode,
3780 u64 data)
3781{
3782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3783
3784 return dd->sw_send_dma_eng_err_status_cnt[19];
3785}
3786
3787static u64 access_sdma_header_request_fifo_unc_err_cnt(
3788 const struct cntr_entry *entry,
3789 void *context, int vl, int mode, u64 data)
3790{
3791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3792
3793 return dd->sw_send_dma_eng_err_status_cnt[18];
3794}
3795
3796static u64 access_sdma_header_storage_unc_err_cnt(
3797 const struct cntr_entry *entry,
3798 void *context, int vl, int mode, u64 data)
3799{
3800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3801
3802 return dd->sw_send_dma_eng_err_status_cnt[17];
3803}
3804
3805static u64 access_sdma_packet_tracking_unc_err_cnt(
3806 const struct cntr_entry *entry,
3807 void *context, int vl, int mode, u64 data)
3808{
3809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3810
3811 return dd->sw_send_dma_eng_err_status_cnt[16];
3812}
3813
3814static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3815 void *context, int vl, int mode,
3816 u64 data)
3817{
3818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3819
3820 return dd->sw_send_dma_eng_err_status_cnt[15];
3821}
3822
3823static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3824 void *context, int vl, int mode,
3825 u64 data)
3826{
3827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3828
3829 return dd->sw_send_dma_eng_err_status_cnt[14];
3830}
3831
3832static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3833 void *context, int vl, int mode,
3834 u64 data)
3835{
3836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3837
3838 return dd->sw_send_dma_eng_err_status_cnt[13];
3839}
3840
3841static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3842 void *context, int vl, int mode,
3843 u64 data)
3844{
3845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3846
3847 return dd->sw_send_dma_eng_err_status_cnt[12];
3848}
3849
3850static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3851 void *context, int vl, int mode,
3852 u64 data)
3853{
3854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3855
3856 return dd->sw_send_dma_eng_err_status_cnt[11];
3857}
3858
3859static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3860 void *context, int vl, int mode,
3861 u64 data)
3862{
3863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3864
3865 return dd->sw_send_dma_eng_err_status_cnt[10];
3866}
3867
3868static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3869 void *context, int vl, int mode,
3870 u64 data)
3871{
3872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3873
3874 return dd->sw_send_dma_eng_err_status_cnt[9];
3875}
3876
3877static u64 access_sdma_packet_desc_overflow_err_cnt(
3878 const struct cntr_entry *entry,
3879 void *context, int vl, int mode, u64 data)
3880{
3881 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3882
3883 return dd->sw_send_dma_eng_err_status_cnt[8];
3884}
3885
3886static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3887 void *context, int vl,
3888 int mode, u64 data)
3889{
3890 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3891
3892 return dd->sw_send_dma_eng_err_status_cnt[7];
3893}
3894
3895static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3896 void *context, int vl, int mode, u64 data)
3897{
3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899
3900 return dd->sw_send_dma_eng_err_status_cnt[6];
3901}
3902
3903static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3904 void *context, int vl, int mode,
3905 u64 data)
3906{
3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908
3909 return dd->sw_send_dma_eng_err_status_cnt[5];
3910}
3911
3912static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3913 void *context, int vl, int mode,
3914 u64 data)
3915{
3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917
3918 return dd->sw_send_dma_eng_err_status_cnt[4];
3919}
3920
3921static u64 access_sdma_tail_out_of_bounds_err_cnt(
3922 const struct cntr_entry *entry,
3923 void *context, int vl, int mode, u64 data)
3924{
3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926
3927 return dd->sw_send_dma_eng_err_status_cnt[3];
3928}
3929
3930static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3931 void *context, int vl, int mode,
3932 u64 data)
3933{
3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935
3936 return dd->sw_send_dma_eng_err_status_cnt[2];
3937}
3938
3939static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3940 void *context, int vl, int mode,
3941 u64 data)
3942{
3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944
3945 return dd->sw_send_dma_eng_err_status_cnt[1];
3946}
3947
3948static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3949 void *context, int vl, int mode,
3950 u64 data)
3951{
3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953
3954 return dd->sw_send_dma_eng_err_status_cnt[0];
3955}
3956
Jakub Pawlak2b719042016-07-01 16:01:22 -07003957static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3958 void *context, int vl, int mode,
3959 u64 data)
3960{
3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962
3963 u64 val = 0;
3964 u64 csr = entry->csr;
3965
3966 val = read_write_csr(dd, csr, mode, data);
3967 if (mode == CNTR_MODE_R) {
3968 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
3969 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
3970 } else if (mode == CNTR_MODE_W) {
3971 dd->sw_rcv_bypass_packet_errors = 0;
3972 } else {
3973 dd_dev_err(dd, "Invalid cntr register access mode");
3974 return 0;
3975 }
3976 return val;
3977}
3978
Mike Marciniszyn77241052015-07-30 15:17:43 -04003979#define def_access_sw_cpu(cntr) \
3980static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
3981 void *context, int vl, int mode, u64 data) \
3982{ \
3983 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08003984 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
3985 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04003986 mode, data); \
3987}
3988
3989def_access_sw_cpu(rc_acks);
3990def_access_sw_cpu(rc_qacks);
3991def_access_sw_cpu(rc_delayed_comp);
3992
3993#define def_access_ibp_counter(cntr) \
3994static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
3995 void *context, int vl, int mode, u64 data) \
3996{ \
3997 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
3998 \
3999 if (vl != CNTR_INVALID_VL) \
4000 return 0; \
4001 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004002 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004003 mode, data); \
4004}
4005
4006def_access_ibp_counter(loop_pkts);
4007def_access_ibp_counter(rc_resends);
4008def_access_ibp_counter(rnr_naks);
4009def_access_ibp_counter(other_naks);
4010def_access_ibp_counter(rc_timeouts);
4011def_access_ibp_counter(pkt_drops);
4012def_access_ibp_counter(dmawait);
4013def_access_ibp_counter(rc_seqnak);
4014def_access_ibp_counter(rc_dupreq);
4015def_access_ibp_counter(rdma_seq);
4016def_access_ibp_counter(unaligned);
4017def_access_ibp_counter(seq_naks);
4018
4019static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4020[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4021[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4022 CNTR_NORMAL),
4023[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4024 CNTR_NORMAL),
4025[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4026 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4027 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004028[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4029 CNTR_NORMAL),
4030[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4031 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4032[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4033 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4034[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4035 CNTR_NORMAL),
4036[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4037 CNTR_NORMAL),
4038[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4039 CNTR_NORMAL),
4040[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4041 CNTR_NORMAL),
4042[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4043 CNTR_NORMAL),
4044[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4045 CNTR_NORMAL),
4046[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4047 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4048[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4049 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4050[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4051 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004052[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4053 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004054[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4055 CNTR_SYNTH),
4056[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4057 CNTR_SYNTH),
4058[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4059 CNTR_SYNTH),
4060[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4061 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4062[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4063 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4064 CNTR_SYNTH),
4065[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4066 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4067[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4068 CNTR_SYNTH),
4069[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4070 CNTR_SYNTH),
4071[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4072 CNTR_SYNTH),
4073[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4074 CNTR_SYNTH),
4075[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4076 CNTR_SYNTH),
4077[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4078 CNTR_SYNTH),
4079[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4080 CNTR_SYNTH),
4081[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4082 CNTR_SYNTH | CNTR_VL),
4083[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4084 CNTR_SYNTH | CNTR_VL),
4085[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4086[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4087 CNTR_SYNTH | CNTR_VL),
4088[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4089[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4090 CNTR_SYNTH | CNTR_VL),
4091[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4092 CNTR_SYNTH),
4093[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4094 CNTR_SYNTH | CNTR_VL),
4095[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4096 CNTR_SYNTH),
4097[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4098 CNTR_SYNTH | CNTR_VL),
4099[C_DC_TOTAL_CRC] =
4100 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4101 CNTR_SYNTH),
4102[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4103 CNTR_SYNTH),
4104[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4105 CNTR_SYNTH),
4106[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4107 CNTR_SYNTH),
4108[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4109 CNTR_SYNTH),
4110[C_DC_CRC_MULT_LN] =
4111 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4112 CNTR_SYNTH),
4113[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4114 CNTR_SYNTH),
4115[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4116 CNTR_SYNTH),
4117[C_DC_SEQ_CRC_CNT] =
4118 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4119 CNTR_SYNTH),
4120[C_DC_ESC0_ONLY_CNT] =
4121 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4122 CNTR_SYNTH),
4123[C_DC_ESC0_PLUS1_CNT] =
4124 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4125 CNTR_SYNTH),
4126[C_DC_ESC0_PLUS2_CNT] =
4127 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4128 CNTR_SYNTH),
4129[C_DC_REINIT_FROM_PEER_CNT] =
4130 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4131 CNTR_SYNTH),
4132[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4133 CNTR_SYNTH),
4134[C_DC_MISC_FLG_CNT] =
4135 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4136 CNTR_SYNTH),
4137[C_DC_PRF_GOOD_LTP_CNT] =
4138 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4139[C_DC_PRF_ACCEPTED_LTP_CNT] =
4140 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4141 CNTR_SYNTH),
4142[C_DC_PRF_RX_FLIT_CNT] =
4143 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4144[C_DC_PRF_TX_FLIT_CNT] =
4145 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4146[C_DC_PRF_CLK_CNTR] =
4147 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4148[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4149 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4150[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4151 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4152 CNTR_SYNTH),
4153[C_DC_PG_STS_TX_SBE_CNT] =
4154 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4155[C_DC_PG_STS_TX_MBE_CNT] =
4156 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4157 CNTR_SYNTH),
4158[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4159 access_sw_cpu_intr),
4160[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4161 access_sw_cpu_rcv_limit),
4162[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4163 access_sw_vtx_wait),
4164[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4165 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004166[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4167 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004168[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4169 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004170[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4171 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004172[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4173 SEND_DMA_DESC_FETCHED_CNT, 0,
4174 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4175 dev_access_u32_csr),
4176[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4177 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4178 access_sde_int_cnt),
4179[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4180 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4181 access_sde_err_cnt),
4182[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4183 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4184 access_sde_idle_int_cnt),
4185[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4186 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4187 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004188/* MISC_ERR_STATUS */
4189[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4190 CNTR_NORMAL,
4191 access_misc_pll_lock_fail_err_cnt),
4192[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4193 CNTR_NORMAL,
4194 access_misc_mbist_fail_err_cnt),
4195[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4196 CNTR_NORMAL,
4197 access_misc_invalid_eep_cmd_err_cnt),
4198[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4199 CNTR_NORMAL,
4200 access_misc_efuse_done_parity_err_cnt),
4201[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4202 CNTR_NORMAL,
4203 access_misc_efuse_write_err_cnt),
4204[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4205 0, CNTR_NORMAL,
4206 access_misc_efuse_read_bad_addr_err_cnt),
4207[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4208 CNTR_NORMAL,
4209 access_misc_efuse_csr_parity_err_cnt),
4210[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4211 CNTR_NORMAL,
4212 access_misc_fw_auth_failed_err_cnt),
4213[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4214 CNTR_NORMAL,
4215 access_misc_key_mismatch_err_cnt),
4216[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4217 CNTR_NORMAL,
4218 access_misc_sbus_write_failed_err_cnt),
4219[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4220 CNTR_NORMAL,
4221 access_misc_csr_write_bad_addr_err_cnt),
4222[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4223 CNTR_NORMAL,
4224 access_misc_csr_read_bad_addr_err_cnt),
4225[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4226 CNTR_NORMAL,
4227 access_misc_csr_parity_err_cnt),
4228/* CceErrStatus */
4229[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4230 CNTR_NORMAL,
4231 access_sw_cce_err_status_aggregated_cnt),
4232[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4233 CNTR_NORMAL,
4234 access_cce_msix_csr_parity_err_cnt),
4235[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4236 CNTR_NORMAL,
4237 access_cce_int_map_unc_err_cnt),
4238[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4239 CNTR_NORMAL,
4240 access_cce_int_map_cor_err_cnt),
4241[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4242 CNTR_NORMAL,
4243 access_cce_msix_table_unc_err_cnt),
4244[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4245 CNTR_NORMAL,
4246 access_cce_msix_table_cor_err_cnt),
4247[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4248 0, CNTR_NORMAL,
4249 access_cce_rxdma_conv_fifo_parity_err_cnt),
4250[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4251 0, CNTR_NORMAL,
4252 access_cce_rcpl_async_fifo_parity_err_cnt),
4253[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4254 CNTR_NORMAL,
4255 access_cce_seg_write_bad_addr_err_cnt),
4256[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4257 CNTR_NORMAL,
4258 access_cce_seg_read_bad_addr_err_cnt),
4259[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4260 CNTR_NORMAL,
4261 access_la_triggered_cnt),
4262[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4263 CNTR_NORMAL,
4264 access_cce_trgt_cpl_timeout_err_cnt),
4265[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4266 CNTR_NORMAL,
4267 access_pcic_receive_parity_err_cnt),
4268[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4269 CNTR_NORMAL,
4270 access_pcic_transmit_back_parity_err_cnt),
4271[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4272 0, CNTR_NORMAL,
4273 access_pcic_transmit_front_parity_err_cnt),
4274[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4275 CNTR_NORMAL,
4276 access_pcic_cpl_dat_q_unc_err_cnt),
4277[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4278 CNTR_NORMAL,
4279 access_pcic_cpl_hd_q_unc_err_cnt),
4280[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4281 CNTR_NORMAL,
4282 access_pcic_post_dat_q_unc_err_cnt),
4283[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4284 CNTR_NORMAL,
4285 access_pcic_post_hd_q_unc_err_cnt),
4286[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4287 CNTR_NORMAL,
4288 access_pcic_retry_sot_mem_unc_err_cnt),
4289[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4290 CNTR_NORMAL,
4291 access_pcic_retry_mem_unc_err),
4292[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4293 CNTR_NORMAL,
4294 access_pcic_n_post_dat_q_parity_err_cnt),
4295[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_pcic_n_post_h_q_parity_err_cnt),
4298[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4299 CNTR_NORMAL,
4300 access_pcic_cpl_dat_q_cor_err_cnt),
4301[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_pcic_cpl_hd_q_cor_err_cnt),
4304[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4305 CNTR_NORMAL,
4306 access_pcic_post_dat_q_cor_err_cnt),
4307[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_pcic_post_hd_q_cor_err_cnt),
4310[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4311 CNTR_NORMAL,
4312 access_pcic_retry_sot_mem_cor_err_cnt),
4313[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_pcic_retry_mem_cor_err_cnt),
4316[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4317 "CceCli1AsyncFifoDbgParityError", 0, 0,
4318 CNTR_NORMAL,
4319 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4320[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4321 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4322 CNTR_NORMAL,
4323 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4324 ),
4325[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4326 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4327 CNTR_NORMAL,
4328 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4329[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4330 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4331 CNTR_NORMAL,
4332 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4333[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4334 0, CNTR_NORMAL,
4335 access_cce_cli2_async_fifo_parity_err_cnt),
4336[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_cce_csr_cfg_bus_parity_err_cnt),
4339[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4340 0, CNTR_NORMAL,
4341 access_cce_cli0_async_fifo_parity_err_cnt),
4342[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_cce_rspd_data_parity_err_cnt),
4345[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_cce_trgt_access_err_cnt),
4348[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4349 0, CNTR_NORMAL,
4350 access_cce_trgt_async_fifo_parity_err_cnt),
4351[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4352 CNTR_NORMAL,
4353 access_cce_csr_write_bad_addr_err_cnt),
4354[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_cce_csr_read_bad_addr_err_cnt),
4357[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_ccs_csr_parity_err_cnt),
4360
4361/* RcvErrStatus */
4362[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4363 CNTR_NORMAL,
4364 access_rx_csr_parity_err_cnt),
4365[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4366 CNTR_NORMAL,
4367 access_rx_csr_write_bad_addr_err_cnt),
4368[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4369 CNTR_NORMAL,
4370 access_rx_csr_read_bad_addr_err_cnt),
4371[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4372 CNTR_NORMAL,
4373 access_rx_dma_csr_unc_err_cnt),
4374[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4375 CNTR_NORMAL,
4376 access_rx_dma_dq_fsm_encoding_err_cnt),
4377[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4378 CNTR_NORMAL,
4379 access_rx_dma_eq_fsm_encoding_err_cnt),
4380[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4381 CNTR_NORMAL,
4382 access_rx_dma_csr_parity_err_cnt),
4383[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4384 CNTR_NORMAL,
4385 access_rx_rbuf_data_cor_err_cnt),
4386[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4387 CNTR_NORMAL,
4388 access_rx_rbuf_data_unc_err_cnt),
4389[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4390 CNTR_NORMAL,
4391 access_rx_dma_data_fifo_rd_cor_err_cnt),
4392[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4393 CNTR_NORMAL,
4394 access_rx_dma_data_fifo_rd_unc_err_cnt),
4395[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4396 CNTR_NORMAL,
4397 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4398[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4399 CNTR_NORMAL,
4400 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4401[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4402 CNTR_NORMAL,
4403 access_rx_rbuf_desc_part2_cor_err_cnt),
4404[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4405 CNTR_NORMAL,
4406 access_rx_rbuf_desc_part2_unc_err_cnt),
4407[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rx_rbuf_desc_part1_cor_err_cnt),
4410[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_rx_rbuf_desc_part1_unc_err_cnt),
4413[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rx_hq_intr_fsm_err_cnt),
4416[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_rx_hq_intr_csr_parity_err_cnt),
4419[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4420 CNTR_NORMAL,
4421 access_rx_lookup_csr_parity_err_cnt),
4422[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_lookup_rcv_array_cor_err_cnt),
4425[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_lookup_rcv_array_unc_err_cnt),
4428[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4429 0, CNTR_NORMAL,
4430 access_rx_lookup_des_part2_parity_err_cnt),
4431[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4432 0, CNTR_NORMAL,
4433 access_rx_lookup_des_part1_unc_cor_err_cnt),
4434[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rx_lookup_des_part1_unc_err_cnt),
4437[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rx_rbuf_next_free_buf_cor_err_cnt),
4440[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4441 CNTR_NORMAL,
4442 access_rx_rbuf_next_free_buf_unc_err_cnt),
4443[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4444 "RxRbufFlInitWrAddrParityErr", 0, 0,
4445 CNTR_NORMAL,
4446 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4447[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4448 0, CNTR_NORMAL,
4449 access_rx_rbuf_fl_initdone_parity_err_cnt),
4450[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4451 0, CNTR_NORMAL,
4452 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4453[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4454 CNTR_NORMAL,
4455 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4456[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4457 CNTR_NORMAL,
4458 access_rx_rbuf_empty_err_cnt),
4459[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4460 CNTR_NORMAL,
4461 access_rx_rbuf_full_err_cnt),
4462[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4463 CNTR_NORMAL,
4464 access_rbuf_bad_lookup_err_cnt),
4465[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4466 CNTR_NORMAL,
4467 access_rbuf_ctx_id_parity_err_cnt),
4468[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4469 CNTR_NORMAL,
4470 access_rbuf_csr_qeopdw_parity_err_cnt),
4471[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4472 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4475[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4476 "RxRbufCsrQTlPtrParityErr", 0, 0,
4477 CNTR_NORMAL,
4478 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4479[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4480 0, CNTR_NORMAL,
4481 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4482[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4483 0, CNTR_NORMAL,
4484 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4485[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4486 0, 0, CNTR_NORMAL,
4487 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4488[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4489 0, CNTR_NORMAL,
4490 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4491[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4492 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4495[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4496 0, CNTR_NORMAL,
4497 access_rx_rbuf_block_list_read_cor_err_cnt),
4498[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4499 0, CNTR_NORMAL,
4500 access_rx_rbuf_block_list_read_unc_err_cnt),
4501[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rx_rbuf_lookup_des_cor_err_cnt),
4504[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4505 CNTR_NORMAL,
4506 access_rx_rbuf_lookup_des_unc_err_cnt),
4507[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4508 "RxRbufLookupDesRegUncCorErr", 0, 0,
4509 CNTR_NORMAL,
4510 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4511[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4512 CNTR_NORMAL,
4513 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4514[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_rbuf_free_list_cor_err_cnt),
4517[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rbuf_free_list_unc_err_cnt),
4520[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_rcv_fsm_encoding_err_cnt),
4523[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4524 CNTR_NORMAL,
4525 access_rx_dma_flag_cor_err_cnt),
4526[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4527 CNTR_NORMAL,
4528 access_rx_dma_flag_unc_err_cnt),
4529[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4530 CNTR_NORMAL,
4531 access_rx_dc_sop_eop_parity_err_cnt),
4532[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4533 CNTR_NORMAL,
4534 access_rx_rcv_csr_parity_err_cnt),
4535[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4536 CNTR_NORMAL,
4537 access_rx_rcv_qp_map_table_cor_err_cnt),
4538[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4539 CNTR_NORMAL,
4540 access_rx_rcv_qp_map_table_unc_err_cnt),
4541[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4542 CNTR_NORMAL,
4543 access_rx_rcv_data_cor_err_cnt),
4544[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4545 CNTR_NORMAL,
4546 access_rx_rcv_data_unc_err_cnt),
4547[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_rx_rcv_hdr_cor_err_cnt),
4550[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_rx_rcv_hdr_unc_err_cnt),
4553[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_rx_dc_intf_parity_err_cnt),
4556[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_rx_dma_csr_cor_err_cnt),
4559/* SendPioErrStatus */
4560[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4561 CNTR_NORMAL,
4562 access_pio_pec_sop_head_parity_err_cnt),
4563[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4564 CNTR_NORMAL,
4565 access_pio_pcc_sop_head_parity_err_cnt),
4566[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4567 0, 0, CNTR_NORMAL,
4568 access_pio_last_returned_cnt_parity_err_cnt),
4569[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4570 0, CNTR_NORMAL,
4571 access_pio_current_free_cnt_parity_err_cnt),
4572[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4573 CNTR_NORMAL,
4574 access_pio_reserved_31_err_cnt),
4575[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4576 CNTR_NORMAL,
4577 access_pio_reserved_30_err_cnt),
4578[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4579 CNTR_NORMAL,
4580 access_pio_ppmc_sop_len_err_cnt),
4581[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_pio_ppmc_bqc_mem_parity_err_cnt),
4584[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_pio_vl_fifo_parity_err_cnt),
4587[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4588 CNTR_NORMAL,
4589 access_pio_vlf_sop_parity_err_cnt),
4590[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4591 CNTR_NORMAL,
4592 access_pio_vlf_v1_len_parity_err_cnt),
4593[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_pio_block_qw_count_parity_err_cnt),
4596[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_pio_write_qw_valid_parity_err_cnt),
4599[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4600 CNTR_NORMAL,
4601 access_pio_state_machine_err_cnt),
4602[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4603 CNTR_NORMAL,
4604 access_pio_write_data_parity_err_cnt),
4605[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_host_addr_mem_cor_err_cnt),
4608[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_host_addr_mem_unc_err_cnt),
4611[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4614[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_init_sm_in_err_cnt),
4617[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_ppmc_pbl_fifo_err_cnt),
4620[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4621 0, CNTR_NORMAL,
4622 access_pio_credit_ret_fifo_parity_err_cnt),
4623[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_v1_len_mem_bank1_cor_err_cnt),
4626[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_v1_len_mem_bank0_cor_err_cnt),
4629[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_v1_len_mem_bank1_unc_err_cnt),
4632[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_v1_len_mem_bank0_unc_err_cnt),
4635[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_sm_pkt_reset_parity_err_cnt),
4638[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_pio_pkt_evict_fifo_parity_err_cnt),
4641[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4642 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4643 CNTR_NORMAL,
4644 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4645[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4646 CNTR_NORMAL,
4647 access_pio_sbrdctl_crrel_parity_err_cnt),
4648[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4649 CNTR_NORMAL,
4650 access_pio_pec_fifo_parity_err_cnt),
4651[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4652 CNTR_NORMAL,
4653 access_pio_pcc_fifo_parity_err_cnt),
4654[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4655 CNTR_NORMAL,
4656 access_pio_sb_mem_fifo1_err_cnt),
4657[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4658 CNTR_NORMAL,
4659 access_pio_sb_mem_fifo0_err_cnt),
4660[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4661 CNTR_NORMAL,
4662 access_pio_csr_parity_err_cnt),
4663[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4664 CNTR_NORMAL,
4665 access_pio_write_addr_parity_err_cnt),
4666[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4667 CNTR_NORMAL,
4668 access_pio_write_bad_ctxt_err_cnt),
4669/* SendDmaErrStatus */
4670[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4671 0, CNTR_NORMAL,
4672 access_sdma_pcie_req_tracking_cor_err_cnt),
4673[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4674 0, CNTR_NORMAL,
4675 access_sdma_pcie_req_tracking_unc_err_cnt),
4676[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_sdma_csr_parity_err_cnt),
4679[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_sdma_rpy_tag_err_cnt),
4682/* SendEgressErrStatus */
4683[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4684 CNTR_NORMAL,
4685 access_tx_read_pio_memory_csr_unc_err_cnt),
4686[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4687 0, CNTR_NORMAL,
4688 access_tx_read_sdma_memory_csr_err_cnt),
4689[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4690 CNTR_NORMAL,
4691 access_tx_egress_fifo_cor_err_cnt),
4692[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4693 CNTR_NORMAL,
4694 access_tx_read_pio_memory_cor_err_cnt),
4695[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4696 CNTR_NORMAL,
4697 access_tx_read_sdma_memory_cor_err_cnt),
4698[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4699 CNTR_NORMAL,
4700 access_tx_sb_hdr_cor_err_cnt),
4701[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4702 CNTR_NORMAL,
4703 access_tx_credit_overrun_err_cnt),
4704[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4705 CNTR_NORMAL,
4706 access_tx_launch_fifo8_cor_err_cnt),
4707[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4708 CNTR_NORMAL,
4709 access_tx_launch_fifo7_cor_err_cnt),
4710[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4711 CNTR_NORMAL,
4712 access_tx_launch_fifo6_cor_err_cnt),
4713[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4714 CNTR_NORMAL,
4715 access_tx_launch_fifo5_cor_err_cnt),
4716[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_tx_launch_fifo4_cor_err_cnt),
4719[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4720 CNTR_NORMAL,
4721 access_tx_launch_fifo3_cor_err_cnt),
4722[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4723 CNTR_NORMAL,
4724 access_tx_launch_fifo2_cor_err_cnt),
4725[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4726 CNTR_NORMAL,
4727 access_tx_launch_fifo1_cor_err_cnt),
4728[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4729 CNTR_NORMAL,
4730 access_tx_launch_fifo0_cor_err_cnt),
4731[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4732 CNTR_NORMAL,
4733 access_tx_credit_return_vl_err_cnt),
4734[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4735 CNTR_NORMAL,
4736 access_tx_hcrc_insertion_err_cnt),
4737[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4738 CNTR_NORMAL,
4739 access_tx_egress_fifo_unc_err_cnt),
4740[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_tx_read_pio_memory_unc_err_cnt),
4743[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_tx_read_sdma_memory_unc_err_cnt),
4746[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4747 CNTR_NORMAL,
4748 access_tx_sb_hdr_unc_err_cnt),
4749[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4750 CNTR_NORMAL,
4751 access_tx_credit_return_partiy_err_cnt),
4752[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4753 0, 0, CNTR_NORMAL,
4754 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4755[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4756 0, 0, CNTR_NORMAL,
4757 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4758[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4759 0, 0, CNTR_NORMAL,
4760 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4761[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4762 0, 0, CNTR_NORMAL,
4763 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4764[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4765 0, 0, CNTR_NORMAL,
4766 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4767[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4768 0, 0, CNTR_NORMAL,
4769 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4770[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4771 0, 0, CNTR_NORMAL,
4772 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4773[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4774 0, 0, CNTR_NORMAL,
4775 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4776[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4777 0, 0, CNTR_NORMAL,
4778 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4779[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4780 0, 0, CNTR_NORMAL,
4781 access_tx_sdma15_disallowed_packet_err_cnt),
4782[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4783 0, 0, CNTR_NORMAL,
4784 access_tx_sdma14_disallowed_packet_err_cnt),
4785[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4786 0, 0, CNTR_NORMAL,
4787 access_tx_sdma13_disallowed_packet_err_cnt),
4788[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4789 0, 0, CNTR_NORMAL,
4790 access_tx_sdma12_disallowed_packet_err_cnt),
4791[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4792 0, 0, CNTR_NORMAL,
4793 access_tx_sdma11_disallowed_packet_err_cnt),
4794[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4795 0, 0, CNTR_NORMAL,
4796 access_tx_sdma10_disallowed_packet_err_cnt),
4797[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4798 0, 0, CNTR_NORMAL,
4799 access_tx_sdma9_disallowed_packet_err_cnt),
4800[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4801 0, 0, CNTR_NORMAL,
4802 access_tx_sdma8_disallowed_packet_err_cnt),
4803[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4804 0, 0, CNTR_NORMAL,
4805 access_tx_sdma7_disallowed_packet_err_cnt),
4806[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4807 0, 0, CNTR_NORMAL,
4808 access_tx_sdma6_disallowed_packet_err_cnt),
4809[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4810 0, 0, CNTR_NORMAL,
4811 access_tx_sdma5_disallowed_packet_err_cnt),
4812[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4813 0, 0, CNTR_NORMAL,
4814 access_tx_sdma4_disallowed_packet_err_cnt),
4815[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4816 0, 0, CNTR_NORMAL,
4817 access_tx_sdma3_disallowed_packet_err_cnt),
4818[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma2_disallowed_packet_err_cnt),
4821[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4822 0, 0, CNTR_NORMAL,
4823 access_tx_sdma1_disallowed_packet_err_cnt),
4824[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4825 0, 0, CNTR_NORMAL,
4826 access_tx_sdma0_disallowed_packet_err_cnt),
4827[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4828 CNTR_NORMAL,
4829 access_tx_config_parity_err_cnt),
4830[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4831 CNTR_NORMAL,
4832 access_tx_sbrd_ctl_csr_parity_err_cnt),
4833[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4834 CNTR_NORMAL,
4835 access_tx_launch_csr_parity_err_cnt),
4836[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4837 CNTR_NORMAL,
4838 access_tx_illegal_vl_err_cnt),
4839[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4840 "TxSbrdCtlStateMachineParityErr", 0, 0,
4841 CNTR_NORMAL,
4842 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4843[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4844 CNTR_NORMAL,
4845 access_egress_reserved_10_err_cnt),
4846[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4847 CNTR_NORMAL,
4848 access_egress_reserved_9_err_cnt),
4849[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4850 0, 0, CNTR_NORMAL,
4851 access_tx_sdma_launch_intf_parity_err_cnt),
4852[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4853 CNTR_NORMAL,
4854 access_tx_pio_launch_intf_parity_err_cnt),
4855[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4856 CNTR_NORMAL,
4857 access_egress_reserved_6_err_cnt),
4858[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4859 CNTR_NORMAL,
4860 access_tx_incorrect_link_state_err_cnt),
4861[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4862 CNTR_NORMAL,
4863 access_tx_linkdown_err_cnt),
4864[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4865 "EgressFifoUnderrunOrParityErr", 0, 0,
4866 CNTR_NORMAL,
4867 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4868[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4869 CNTR_NORMAL,
4870 access_egress_reserved_2_err_cnt),
4871[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4872 CNTR_NORMAL,
4873 access_tx_pkt_integrity_mem_unc_err_cnt),
4874[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4875 CNTR_NORMAL,
4876 access_tx_pkt_integrity_mem_cor_err_cnt),
4877/* SendErrStatus */
4878[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4879 CNTR_NORMAL,
4880 access_send_csr_write_bad_addr_err_cnt),
4881[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4882 CNTR_NORMAL,
4883 access_send_csr_read_bad_addr_err_cnt),
4884[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4885 CNTR_NORMAL,
4886 access_send_csr_parity_cnt),
4887/* SendCtxtErrStatus */
4888[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4889 CNTR_NORMAL,
4890 access_pio_write_out_of_bounds_err_cnt),
4891[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_pio_write_overflow_err_cnt),
4894[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4895 0, 0, CNTR_NORMAL,
4896 access_pio_write_crosses_boundary_err_cnt),
4897[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4898 CNTR_NORMAL,
4899 access_pio_disallowed_packet_err_cnt),
4900[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4901 CNTR_NORMAL,
4902 access_pio_inconsistent_sop_err_cnt),
4903/* SendDmaEngErrStatus */
4904[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4905 0, 0, CNTR_NORMAL,
4906 access_sdma_header_request_fifo_cor_err_cnt),
4907[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_sdma_header_storage_cor_err_cnt),
4910[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_sdma_packet_tracking_cor_err_cnt),
4913[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_sdma_assembly_cor_err_cnt),
4916[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4917 CNTR_NORMAL,
4918 access_sdma_desc_table_cor_err_cnt),
4919[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4920 0, 0, CNTR_NORMAL,
4921 access_sdma_header_request_fifo_unc_err_cnt),
4922[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4923 CNTR_NORMAL,
4924 access_sdma_header_storage_unc_err_cnt),
4925[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4926 CNTR_NORMAL,
4927 access_sdma_packet_tracking_unc_err_cnt),
4928[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4929 CNTR_NORMAL,
4930 access_sdma_assembly_unc_err_cnt),
4931[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4932 CNTR_NORMAL,
4933 access_sdma_desc_table_unc_err_cnt),
4934[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4935 CNTR_NORMAL,
4936 access_sdma_timeout_err_cnt),
4937[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4938 CNTR_NORMAL,
4939 access_sdma_header_length_err_cnt),
4940[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4941 CNTR_NORMAL,
4942 access_sdma_header_address_err_cnt),
4943[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4944 CNTR_NORMAL,
4945 access_sdma_header_select_err_cnt),
4946[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4947 CNTR_NORMAL,
4948 access_sdma_reserved_9_err_cnt),
4949[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4950 CNTR_NORMAL,
4951 access_sdma_packet_desc_overflow_err_cnt),
4952[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4953 CNTR_NORMAL,
4954 access_sdma_length_mismatch_err_cnt),
4955[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4956 CNTR_NORMAL,
4957 access_sdma_halt_err_cnt),
4958[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4959 CNTR_NORMAL,
4960 access_sdma_mem_read_err_cnt),
4961[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4962 CNTR_NORMAL,
4963 access_sdma_first_desc_err_cnt),
4964[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4965 CNTR_NORMAL,
4966 access_sdma_tail_out_of_bounds_err_cnt),
4967[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4968 CNTR_NORMAL,
4969 access_sdma_too_long_err_cnt),
4970[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4971 CNTR_NORMAL,
4972 access_sdma_gen_mismatch_err_cnt),
4973[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4974 CNTR_NORMAL,
4975 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004976};
4977
4978static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4979[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4980 CNTR_NORMAL),
4981[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4982 CNTR_NORMAL),
4983[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4984 CNTR_NORMAL),
4985[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4986 CNTR_NORMAL),
4987[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4988 CNTR_NORMAL),
4989[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4990 CNTR_NORMAL),
4991[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4992 CNTR_NORMAL),
4993[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4994[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4995[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4996[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004997 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004998[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08004999 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005000[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005001 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005002[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5003[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5004[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005005 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005006[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005007 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005008[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5009 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005010[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005011 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005012[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005013 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5014 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005015[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005016 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005017[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005018 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005019[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5020[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5021[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5022[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5023[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5024[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5025[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5026[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5027[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5028[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5029[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5030[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5031[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5032 access_sw_cpu_rc_acks),
5033[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005034 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005035[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005036 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005037[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5038[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5039[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5040[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5041[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5042[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5043[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5044[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5045[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5046[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5047[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5048[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5049[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5050[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5051[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5052[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5053[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5054[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5055[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5056[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5057[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5058[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5059[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5060[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5061[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5062[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5063[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5064[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5065[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5066[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5067[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5068[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5069[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5070[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5071[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5072[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5073[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5074[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5075[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5076[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5077[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5078[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5079[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5080[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5081[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5082[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5083[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5084[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5085[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5086[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5087[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5088[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5089[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5090[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5091[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5092[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5093[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5094[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5095[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5096[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5097[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5098[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5099[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5100[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5101[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5102[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5103[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5104[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5105[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5106[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5107[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5108[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5109[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5110[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5111[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5112[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5113[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5114[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5115[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5116[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5117};
5118
5119/* ======================================================================== */
5120
Mike Marciniszyn77241052015-07-30 15:17:43 -04005121/* return true if this is chip revision revision a */
5122int is_ax(struct hfi1_devdata *dd)
5123{
5124 u8 chip_rev_minor =
5125 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5126 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5127 return (chip_rev_minor & 0xf0) == 0;
5128}
5129
5130/* return true if this is chip revision revision b */
5131int is_bx(struct hfi1_devdata *dd)
5132{
5133 u8 chip_rev_minor =
5134 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5135 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005136 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005137}
5138
5139/*
5140 * Append string s to buffer buf. Arguments curp and len are the current
5141 * position and remaining length, respectively.
5142 *
5143 * return 0 on success, 1 on out of room
5144 */
5145static int append_str(char *buf, char **curp, int *lenp, const char *s)
5146{
5147 char *p = *curp;
5148 int len = *lenp;
5149 int result = 0; /* success */
5150 char c;
5151
5152 /* add a comma, if first in the buffer */
5153 if (p != buf) {
5154 if (len == 0) {
5155 result = 1; /* out of room */
5156 goto done;
5157 }
5158 *p++ = ',';
5159 len--;
5160 }
5161
5162 /* copy the string */
5163 while ((c = *s++) != 0) {
5164 if (len == 0) {
5165 result = 1; /* out of room */
5166 goto done;
5167 }
5168 *p++ = c;
5169 len--;
5170 }
5171
5172done:
5173 /* write return values */
5174 *curp = p;
5175 *lenp = len;
5176
5177 return result;
5178}
5179
5180/*
5181 * Using the given flag table, print a comma separated string into
5182 * the buffer. End in '*' if the buffer is too short.
5183 */
5184static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005185 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005186{
5187 char extra[32];
5188 char *p = buf;
5189 int len = buf_len;
5190 int no_room = 0;
5191 int i;
5192
5193 /* make sure there is at least 2 so we can form "*" */
5194 if (len < 2)
5195 return "";
5196
5197 len--; /* leave room for a nul */
5198 for (i = 0; i < table_size; i++) {
5199 if (flags & table[i].flag) {
5200 no_room = append_str(buf, &p, &len, table[i].str);
5201 if (no_room)
5202 break;
5203 flags &= ~table[i].flag;
5204 }
5205 }
5206
5207 /* any undocumented bits left? */
5208 if (!no_room && flags) {
5209 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5210 no_room = append_str(buf, &p, &len, extra);
5211 }
5212
5213 /* add * if ran out of room */
5214 if (no_room) {
5215 /* may need to back up to add space for a '*' */
5216 if (len == 0)
5217 --p;
5218 *p++ = '*';
5219 }
5220
5221 /* add final nul - space already allocated above */
5222 *p = 0;
5223 return buf;
5224}
5225
5226/* first 8 CCE error interrupt source names */
5227static const char * const cce_misc_names[] = {
5228 "CceErrInt", /* 0 */
5229 "RxeErrInt", /* 1 */
5230 "MiscErrInt", /* 2 */
5231 "Reserved3", /* 3 */
5232 "PioErrInt", /* 4 */
5233 "SDmaErrInt", /* 5 */
5234 "EgressErrInt", /* 6 */
5235 "TxeErrInt" /* 7 */
5236};
5237
5238/*
5239 * Return the miscellaneous error interrupt name.
5240 */
5241static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5242{
5243 if (source < ARRAY_SIZE(cce_misc_names))
5244 strncpy(buf, cce_misc_names[source], bsize);
5245 else
Jubin John17fb4f22016-02-14 20:21:52 -08005246 snprintf(buf, bsize, "Reserved%u",
5247 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005248
5249 return buf;
5250}
5251
5252/*
5253 * Return the SDMA engine error interrupt name.
5254 */
5255static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5256{
5257 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5258 return buf;
5259}
5260
5261/*
5262 * Return the send context error interrupt name.
5263 */
5264static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5265{
5266 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5267 return buf;
5268}
5269
5270static const char * const various_names[] = {
5271 "PbcInt",
5272 "GpioAssertInt",
5273 "Qsfp1Int",
5274 "Qsfp2Int",
5275 "TCritInt"
5276};
5277
5278/*
5279 * Return the various interrupt name.
5280 */
5281static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5282{
5283 if (source < ARRAY_SIZE(various_names))
5284 strncpy(buf, various_names[source], bsize);
5285 else
Jubin John8638b772016-02-14 20:19:24 -08005286 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005287 return buf;
5288}
5289
5290/*
5291 * Return the DC interrupt name.
5292 */
5293static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5294{
5295 static const char * const dc_int_names[] = {
5296 "common",
5297 "lcb",
5298 "8051",
5299 "lbm" /* local block merge */
5300 };
5301
5302 if (source < ARRAY_SIZE(dc_int_names))
5303 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5304 else
5305 snprintf(buf, bsize, "DCInt%u", source);
5306 return buf;
5307}
5308
5309static const char * const sdma_int_names[] = {
5310 "SDmaInt",
5311 "SdmaIdleInt",
5312 "SdmaProgressInt",
5313};
5314
5315/*
5316 * Return the SDMA engine interrupt name.
5317 */
5318static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5319{
5320 /* what interrupt */
5321 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5322 /* which engine */
5323 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5324
5325 if (likely(what < 3))
5326 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5327 else
5328 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5329 return buf;
5330}
5331
5332/*
5333 * Return the receive available interrupt name.
5334 */
5335static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5336{
5337 snprintf(buf, bsize, "RcvAvailInt%u", source);
5338 return buf;
5339}
5340
5341/*
5342 * Return the receive urgent interrupt name.
5343 */
5344static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5345{
5346 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5347 return buf;
5348}
5349
5350/*
5351 * Return the send credit interrupt name.
5352 */
5353static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5354{
5355 snprintf(buf, bsize, "SendCreditInt%u", source);
5356 return buf;
5357}
5358
5359/*
5360 * Return the reserved interrupt name.
5361 */
5362static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5363{
5364 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5365 return buf;
5366}
5367
5368static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5369{
5370 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005371 cce_err_status_flags,
5372 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005373}
5374
5375static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5376{
5377 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005378 rxe_err_status_flags,
5379 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005380}
5381
5382static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5383{
5384 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005385 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005386}
5387
5388static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5389{
5390 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005391 pio_err_status_flags,
5392 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005393}
5394
5395static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5396{
5397 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005398 sdma_err_status_flags,
5399 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005400}
5401
5402static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5403{
5404 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005405 egress_err_status_flags,
5406 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005407}
5408
5409static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5410{
5411 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005412 egress_err_info_flags,
5413 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005414}
5415
5416static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5417{
5418 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005419 send_err_status_flags,
5420 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005421}
5422
5423static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5424{
5425 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005426 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005427
5428 /*
5429 * For most these errors, there is nothing that can be done except
5430 * report or record it.
5431 */
5432 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005433 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005434
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005435 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5436 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005437 /* this error requires a manual drop into SPC freeze mode */
5438 /* then a fix up */
5439 start_freeze_handling(dd->pport, FREEZE_SELF);
5440 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005441
5442 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5443 if (reg & (1ull << i)) {
5444 incr_cntr64(&dd->cce_err_status_cnt[i]);
5445 /* maintain a counter over all cce_err_status errors */
5446 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5447 }
5448 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005449}
5450
5451/*
5452 * Check counters for receive errors that do not have an interrupt
5453 * associated with them.
5454 */
5455#define RCVERR_CHECK_TIME 10
5456static void update_rcverr_timer(unsigned long opaque)
5457{
5458 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5459 struct hfi1_pportdata *ppd = dd->pport;
5460 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5461
5462 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005463 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005464 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005465 set_link_down_reason(
5466 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5467 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005468 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5469 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005470 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005471
5472 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5473}
5474
5475static int init_rcverr(struct hfi1_devdata *dd)
5476{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305477 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005478 /* Assume the hardware counter has been reset */
5479 dd->rcv_ovfl_cnt = 0;
5480 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5481}
5482
5483static void free_rcverr(struct hfi1_devdata *dd)
5484{
5485 if (dd->rcverr_timer.data)
5486 del_timer_sync(&dd->rcverr_timer);
5487 dd->rcverr_timer.data = 0;
5488}
5489
5490static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5491{
5492 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005493 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005494
5495 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005496 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005497
5498 if (reg & ALL_RXE_FREEZE_ERR) {
5499 int flags = 0;
5500
5501 /*
5502 * Freeze mode recovery is disabled for the errors
5503 * in RXE_FREEZE_ABORT_MASK
5504 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005505 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005506 flags = FREEZE_ABORT;
5507
5508 start_freeze_handling(dd->pport, flags);
5509 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005510
5511 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5512 if (reg & (1ull << i))
5513 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5514 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005515}
5516
5517static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518{
5519 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005520 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005521
5522 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005523 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005524 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5525 if (reg & (1ull << i))
5526 incr_cntr64(&dd->misc_err_status_cnt[i]);
5527 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005528}
5529
5530static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5531{
5532 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005533 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005534
5535 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005536 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005537
5538 if (reg & ALL_PIO_FREEZE_ERR)
5539 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005540
5541 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5542 if (reg & (1ull << i))
5543 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5544 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005545}
5546
5547static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5548{
5549 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005550 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005551
5552 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005553 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005554
5555 if (reg & ALL_SDMA_FREEZE_ERR)
5556 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005557
5558 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5559 if (reg & (1ull << i))
5560 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5561 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005562}
5563
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005564static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5565{
5566 incr_cntr64(&ppd->port_xmit_discards);
5567}
5568
Mike Marciniszyn77241052015-07-30 15:17:43 -04005569static void count_port_inactive(struct hfi1_devdata *dd)
5570{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005571 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005572}
5573
5574/*
5575 * We have had a "disallowed packet" error during egress. Determine the
5576 * integrity check which failed, and update relevant error counter, etc.
5577 *
5578 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5579 * bit of state per integrity check, and so we can miss the reason for an
5580 * egress error if more than one packet fails the same integrity check
5581 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5582 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005583static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5584 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005585{
5586 struct hfi1_pportdata *ppd = dd->pport;
5587 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5588 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5589 char buf[96];
5590
5591 /* clear down all observed info as quickly as possible after read */
5592 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5593
5594 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005595 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5596 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005597
5598 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005599 if (info & PORT_DISCARD_EGRESS_ERRS) {
5600 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005601
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005602 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005603 * Count all applicable bits as individual errors and
5604 * attribute them to the packet that triggered this handler.
5605 * This may not be completely accurate due to limitations
5606 * on the available hardware error information. There is
5607 * a single information register and any number of error
5608 * packets may have occurred and contributed to it before
5609 * this routine is called. This means that:
5610 * a) If multiple packets with the same error occur before
5611 * this routine is called, earlier packets are missed.
5612 * There is only a single bit for each error type.
5613 * b) Errors may not be attributed to the correct VL.
5614 * The driver is attributing all bits in the info register
5615 * to the packet that triggered this call, but bits
5616 * could be an accumulation of different packets with
5617 * different VLs.
5618 * c) A single error packet may have multiple counts attached
5619 * to it. There is no way for the driver to know if
5620 * multiple bits set in the info register are due to a
5621 * single packet or multiple packets. The driver assumes
5622 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005623 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005624 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005625 for (i = 0; i < weight; i++) {
5626 __count_port_discards(ppd);
5627 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5628 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5629 else if (vl == 15)
5630 incr_cntr64(&ppd->port_xmit_discards_vl
5631 [C_VL_15]);
5632 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005633 }
5634}
5635
5636/*
5637 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5638 * register. Does it represent a 'port inactive' error?
5639 */
5640static inline int port_inactive_err(u64 posn)
5641{
5642 return (posn >= SEES(TX_LINKDOWN) &&
5643 posn <= SEES(TX_INCORRECT_LINK_STATE));
5644}
5645
5646/*
5647 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5648 * register. Does it represent a 'disallowed packet' error?
5649 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005650static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005651{
5652 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5653 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5654}
5655
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005656/*
5657 * Input value is a bit position of one of the SDMA engine disallowed
5658 * packet errors. Return which engine. Use of this must be guarded by
5659 * disallowed_pkt_err().
5660 */
5661static inline int disallowed_pkt_engine(int posn)
5662{
5663 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5664}
5665
5666/*
5667 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5668 * be done.
5669 */
5670static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5671{
5672 struct sdma_vl_map *m;
5673 int vl;
5674
5675 /* range check */
5676 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5677 return -1;
5678
5679 rcu_read_lock();
5680 m = rcu_dereference(dd->sdma_map);
5681 vl = m->engine_to_vl[engine];
5682 rcu_read_unlock();
5683
5684 return vl;
5685}
5686
5687/*
5688 * Translate the send context (sofware index) into a VL. Return -1 if the
5689 * translation cannot be done.
5690 */
5691static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5692{
5693 struct send_context_info *sci;
5694 struct send_context *sc;
5695 int i;
5696
5697 sci = &dd->send_contexts[sw_index];
5698
5699 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005700 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005701 return -1;
5702
5703 sc = sci->sc;
5704 if (!sc)
5705 return -1;
5706 if (dd->vld[15].sc == sc)
5707 return 15;
5708 for (i = 0; i < num_vls; i++)
5709 if (dd->vld[i].sc == sc)
5710 return i;
5711
5712 return -1;
5713}
5714
Mike Marciniszyn77241052015-07-30 15:17:43 -04005715static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5716{
5717 u64 reg_copy = reg, handled = 0;
5718 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005719 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005720
5721 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5722 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005723 else if (is_ax(dd) &&
5724 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5725 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005726 start_freeze_handling(dd->pport, 0);
5727
5728 while (reg_copy) {
5729 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005730 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005731 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005732 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005733
5734 if (port_inactive_err(shift)) {
5735 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005736 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005737 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005738 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5739
5740 handle_send_egress_err_info(dd, vl);
5741 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005742 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005743 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005744 }
5745
5746 reg &= ~handled;
5747
5748 if (reg)
5749 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005750 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005751
5752 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5753 if (reg & (1ull << i))
5754 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5755 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005756}
5757
5758static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5759{
5760 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005761 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005762
5763 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005764 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005765
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005766 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5767 if (reg & (1ull << i))
5768 incr_cntr64(&dd->send_err_status_cnt[i]);
5769 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005770}
5771
5772/*
5773 * The maximum number of times the error clear down will loop before
5774 * blocking a repeating error. This value is arbitrary.
5775 */
5776#define MAX_CLEAR_COUNT 20
5777
5778/*
5779 * Clear and handle an error register. All error interrupts are funneled
5780 * through here to have a central location to correctly handle single-
5781 * or multi-shot errors.
5782 *
5783 * For non per-context registers, call this routine with a context value
5784 * of 0 so the per-context offset is zero.
5785 *
5786 * If the handler loops too many times, assume that something is wrong
5787 * and can't be fixed, so mask the error bits.
5788 */
5789static void interrupt_clear_down(struct hfi1_devdata *dd,
5790 u32 context,
5791 const struct err_reg_info *eri)
5792{
5793 u64 reg;
5794 u32 count;
5795
5796 /* read in a loop until no more errors are seen */
5797 count = 0;
5798 while (1) {
5799 reg = read_kctxt_csr(dd, context, eri->status);
5800 if (reg == 0)
5801 break;
5802 write_kctxt_csr(dd, context, eri->clear, reg);
5803 if (likely(eri->handler))
5804 eri->handler(dd, context, reg);
5805 count++;
5806 if (count > MAX_CLEAR_COUNT) {
5807 u64 mask;
5808
5809 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005810 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005811 /*
5812 * Read-modify-write so any other masked bits
5813 * remain masked.
5814 */
5815 mask = read_kctxt_csr(dd, context, eri->mask);
5816 mask &= ~reg;
5817 write_kctxt_csr(dd, context, eri->mask, mask);
5818 break;
5819 }
5820 }
5821}
5822
5823/*
5824 * CCE block "misc" interrupt. Source is < 16.
5825 */
5826static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5827{
5828 const struct err_reg_info *eri = &misc_errs[source];
5829
5830 if (eri->handler) {
5831 interrupt_clear_down(dd, 0, eri);
5832 } else {
5833 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005834 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005835 }
5836}
5837
5838static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5839{
5840 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005841 sc_err_status_flags,
5842 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005843}
5844
5845/*
5846 * Send context error interrupt. Source (hw_context) is < 160.
5847 *
5848 * All send context errors cause the send context to halt. The normal
5849 * clear-down mechanism cannot be used because we cannot clear the
5850 * error bits until several other long-running items are done first.
5851 * This is OK because with the context halted, nothing else is going
5852 * to happen on it anyway.
5853 */
5854static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5855 unsigned int hw_context)
5856{
5857 struct send_context_info *sci;
5858 struct send_context *sc;
5859 char flags[96];
5860 u64 status;
5861 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005862 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005863
5864 sw_index = dd->hw_to_sw[hw_context];
5865 if (sw_index >= dd->num_send_contexts) {
5866 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005867 "out of range sw index %u for send context %u\n",
5868 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005869 return;
5870 }
5871 sci = &dd->send_contexts[sw_index];
5872 sc = sci->sc;
5873 if (!sc) {
5874 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005875 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005876 return;
5877 }
5878
5879 /* tell the software that a halt has begun */
5880 sc_stop(sc, SCF_HALTED);
5881
5882 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5883
5884 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005885 send_context_err_status_string(flags, sizeof(flags),
5886 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005887
5888 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005889 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005890
5891 /*
5892 * Automatically restart halted kernel contexts out of interrupt
5893 * context. User contexts must ask the driver to restart the context.
5894 */
5895 if (sc->type != SC_USER)
5896 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005897
5898 /*
5899 * Update the counters for the corresponding status bits.
5900 * Note that these particular counters are aggregated over all
5901 * 160 contexts.
5902 */
5903 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5904 if (status & (1ull << i))
5905 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5906 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005907}
5908
5909static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5910 unsigned int source, u64 status)
5911{
5912 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005913 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005914
5915 sde = &dd->per_sdma[source];
5916#ifdef CONFIG_SDMA_VERBOSITY
5917 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5918 slashstrip(__FILE__), __LINE__, __func__);
5919 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5920 sde->this_idx, source, (unsigned long long)status);
5921#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005922 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005923 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005924
5925 /*
5926 * Update the counters for the corresponding status bits.
5927 * Note that these particular counters are aggregated over
5928 * all 16 DMA engines.
5929 */
5930 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5931 if (status & (1ull << i))
5932 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5933 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005934}
5935
5936/*
5937 * CCE block SDMA error interrupt. Source is < 16.
5938 */
5939static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5940{
5941#ifdef CONFIG_SDMA_VERBOSITY
5942 struct sdma_engine *sde = &dd->per_sdma[source];
5943
5944 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5945 slashstrip(__FILE__), __LINE__, __func__);
5946 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5947 source);
5948 sdma_dumpstate(sde);
5949#endif
5950 interrupt_clear_down(dd, source, &sdma_eng_err);
5951}
5952
5953/*
5954 * CCE block "various" interrupt. Source is < 8.
5955 */
5956static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5957{
5958 const struct err_reg_info *eri = &various_err[source];
5959
5960 /*
5961 * TCritInt cannot go through interrupt_clear_down()
5962 * because it is not a second tier interrupt. The handler
5963 * should be called directly.
5964 */
5965 if (source == TCRIT_INT_SOURCE)
5966 handle_temp_err(dd);
5967 else if (eri->handler)
5968 interrupt_clear_down(dd, 0, eri);
5969 else
5970 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005971 "%s: Unimplemented/reserved interrupt %d\n",
5972 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005973}
5974
5975static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5976{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005977 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005978 struct hfi1_pportdata *ppd = dd->pport;
5979 unsigned long flags;
5980 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5981
5982 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005983 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08005984 dd_dev_info(dd, "%s: QSFP module removed\n",
5985 __func__);
5986
Mike Marciniszyn77241052015-07-30 15:17:43 -04005987 ppd->driver_link_ready = 0;
5988 /*
5989 * Cable removed, reset all our information about the
5990 * cache and cable capabilities
5991 */
5992
5993 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5994 /*
5995 * We don't set cache_refresh_required here as we expect
5996 * an interrupt when a cable is inserted
5997 */
5998 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08005999 ppd->qsfp_info.reset_needed = 0;
6000 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006001 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006002 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006003 /* Invert the ModPresent pin now to detect plug-in */
6004 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6005 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006006
6007 if ((ppd->offline_disabled_reason >
6008 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006009 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006010 (ppd->offline_disabled_reason ==
6011 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6012 ppd->offline_disabled_reason =
6013 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006014 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006015
Mike Marciniszyn77241052015-07-30 15:17:43 -04006016 if (ppd->host_link_state == HLS_DN_POLL) {
6017 /*
6018 * The link is still in POLL. This means
6019 * that the normal link down processing
6020 * will not happen. We have to do it here
6021 * before turning the DC off.
6022 */
6023 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6024 }
6025 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006026 dd_dev_info(dd, "%s: QSFP module inserted\n",
6027 __func__);
6028
Mike Marciniszyn77241052015-07-30 15:17:43 -04006029 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6030 ppd->qsfp_info.cache_valid = 0;
6031 ppd->qsfp_info.cache_refresh_required = 1;
6032 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006033 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006034
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006035 /*
6036 * Stop inversion of ModPresent pin to detect
6037 * removal of the cable
6038 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006039 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006040 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6041 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6042
6043 ppd->offline_disabled_reason =
6044 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006045 }
6046 }
6047
6048 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006049 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006050 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006051 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6052 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006053 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6054 }
6055
6056 /* Schedule the QSFP work only if there is a cable attached. */
6057 if (qsfp_mod_present(ppd))
6058 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6059}
6060
6061static int request_host_lcb_access(struct hfi1_devdata *dd)
6062{
6063 int ret;
6064
6065 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006066 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6067 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006068 if (ret != HCMD_SUCCESS) {
6069 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006070 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006071 }
6072 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6073}
6074
6075static int request_8051_lcb_access(struct hfi1_devdata *dd)
6076{
6077 int ret;
6078
6079 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006080 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6081 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006082 if (ret != HCMD_SUCCESS) {
6083 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006084 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006085 }
6086 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6087}
6088
6089/*
6090 * Set the LCB selector - allow host access. The DCC selector always
6091 * points to the host.
6092 */
6093static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6094{
6095 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006096 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6097 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006098}
6099
6100/*
6101 * Clear the LCB selector - allow 8051 access. The DCC selector always
6102 * points to the host.
6103 */
6104static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6105{
6106 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006107 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006108}
6109
6110/*
6111 * Acquire LCB access from the 8051. If the host already has access,
6112 * just increment a counter. Otherwise, inform the 8051 that the
6113 * host is taking access.
6114 *
6115 * Returns:
6116 * 0 on success
6117 * -EBUSY if the 8051 has control and cannot be disturbed
6118 * -errno if unable to acquire access from the 8051
6119 */
6120int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6121{
6122 struct hfi1_pportdata *ppd = dd->pport;
6123 int ret = 0;
6124
6125 /*
6126 * Use the host link state lock so the operation of this routine
6127 * { link state check, selector change, count increment } can occur
6128 * as a unit against a link state change. Otherwise there is a
6129 * race between the state change and the count increment.
6130 */
6131 if (sleep_ok) {
6132 mutex_lock(&ppd->hls_lock);
6133 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006134 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006135 udelay(1);
6136 }
6137
6138 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006139 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006140 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006141 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006142 ret = -EBUSY;
6143 goto done;
6144 }
6145
6146 if (dd->lcb_access_count == 0) {
6147 ret = request_host_lcb_access(dd);
6148 if (ret) {
6149 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006150 "%s: unable to acquire LCB access, err %d\n",
6151 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006152 goto done;
6153 }
6154 set_host_lcb_access(dd);
6155 }
6156 dd->lcb_access_count++;
6157done:
6158 mutex_unlock(&ppd->hls_lock);
6159 return ret;
6160}
6161
6162/*
6163 * Release LCB access by decrementing the use count. If the count is moving
6164 * from 1 to 0, inform 8051 that it has control back.
6165 *
6166 * Returns:
6167 * 0 on success
6168 * -errno if unable to release access to the 8051
6169 */
6170int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6171{
6172 int ret = 0;
6173
6174 /*
6175 * Use the host link state lock because the acquire needed it.
6176 * Here, we only need to keep { selector change, count decrement }
6177 * as a unit.
6178 */
6179 if (sleep_ok) {
6180 mutex_lock(&dd->pport->hls_lock);
6181 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006182 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006183 udelay(1);
6184 }
6185
6186 if (dd->lcb_access_count == 0) {
6187 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006188 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006189 goto done;
6190 }
6191
6192 if (dd->lcb_access_count == 1) {
6193 set_8051_lcb_access(dd);
6194 ret = request_8051_lcb_access(dd);
6195 if (ret) {
6196 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006197 "%s: unable to release LCB access, err %d\n",
6198 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006199 /* restore host access if the grant didn't work */
6200 set_host_lcb_access(dd);
6201 goto done;
6202 }
6203 }
6204 dd->lcb_access_count--;
6205done:
6206 mutex_unlock(&dd->pport->hls_lock);
6207 return ret;
6208}
6209
6210/*
6211 * Initialize LCB access variables and state. Called during driver load,
6212 * after most of the initialization is finished.
6213 *
6214 * The DC default is LCB access on for the host. The driver defaults to
6215 * leaving access to the 8051. Assign access now - this constrains the call
6216 * to this routine to be after all LCB set-up is done. In particular, after
6217 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6218 */
6219static void init_lcb_access(struct hfi1_devdata *dd)
6220{
6221 dd->lcb_access_count = 0;
6222}
6223
6224/*
6225 * Write a response back to a 8051 request.
6226 */
6227static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6228{
6229 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006230 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6231 (u64)return_code <<
6232 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6233 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006234}
6235
6236/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006237 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006238 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006239static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006240{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006241 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006242 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006243 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006244 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006245
6246 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6247 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6248 return; /* no request */
6249
6250 /* zero out COMPLETED so the response is seen */
6251 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6252
6253 /* extract request details */
6254 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6255 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6256 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6257 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6258
6259 switch (type) {
6260 case HREQ_LOAD_CONFIG:
6261 case HREQ_SAVE_CONFIG:
6262 case HREQ_READ_CONFIG:
6263 case HREQ_SET_TX_EQ_ABS:
6264 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006265 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006266 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006267 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006268 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6269 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006270 case HREQ_CONFIG_DONE:
6271 hreq_response(dd, HREQ_SUCCESS, 0);
6272 break;
6273
6274 case HREQ_INTERFACE_TEST:
6275 hreq_response(dd, HREQ_SUCCESS, data);
6276 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006277 default:
6278 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6279 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6280 break;
6281 }
6282}
6283
6284static void write_global_credit(struct hfi1_devdata *dd,
6285 u8 vau, u16 total, u16 shared)
6286{
6287 write_csr(dd, SEND_CM_GLOBAL_CREDIT,
Jubin John17fb4f22016-02-14 20:21:52 -08006288 ((u64)total <<
6289 SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6290 ((u64)shared <<
6291 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6292 ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006293}
6294
6295/*
6296 * Set up initial VL15 credits of the remote. Assumes the rest of
6297 * the CM credit registers are zero from a previous global or credit reset .
6298 */
6299void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6300{
6301 /* leave shared count at zero for both global and VL15 */
6302 write_global_credit(dd, vau, vl15buf, 0);
6303
Dennis Dalessandroeacc8302016-10-17 04:19:52 -07006304 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6305 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006306}
6307
6308/*
6309 * Zero all credit details from the previous connection and
6310 * reset the CM manager's internal counters.
6311 */
6312void reset_link_credits(struct hfi1_devdata *dd)
6313{
6314 int i;
6315
6316 /* remove all previous VL credit limits */
6317 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006318 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006319 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6320 write_global_credit(dd, 0, 0, 0);
6321 /* reset the CM block */
6322 pio_send_control(dd, PSC_CM_RESET);
6323}
6324
6325/* convert a vCU to a CU */
6326static u32 vcu_to_cu(u8 vcu)
6327{
6328 return 1 << vcu;
6329}
6330
6331/* convert a CU to a vCU */
6332static u8 cu_to_vcu(u32 cu)
6333{
6334 return ilog2(cu);
6335}
6336
6337/* convert a vAU to an AU */
6338static u32 vau_to_au(u8 vau)
6339{
6340 return 8 * (1 << vau);
6341}
6342
6343static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6344{
6345 ppd->sm_trap_qp = 0x0;
6346 ppd->sa_qp = 0x1;
6347}
6348
6349/*
6350 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6351 */
6352static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6353{
6354 u64 reg;
6355
6356 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6357 write_csr(dd, DC_LCB_CFG_RUN, 0);
6358 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6359 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006360 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006361 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6362 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6363 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006364 write_csr(dd, DCC_CFG_RESET, reg |
6365 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6366 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006367 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006368 if (!abort) {
6369 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6370 write_csr(dd, DCC_CFG_RESET, reg);
6371 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6372 }
6373}
6374
6375/*
6376 * This routine should be called after the link has been transitioned to
6377 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6378 * reset).
6379 *
6380 * The expectation is that the caller of this routine would have taken
6381 * care of properly transitioning the link into the correct state.
6382 */
6383static void dc_shutdown(struct hfi1_devdata *dd)
6384{
6385 unsigned long flags;
6386
6387 spin_lock_irqsave(&dd->dc8051_lock, flags);
6388 if (dd->dc_shutdown) {
6389 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6390 return;
6391 }
6392 dd->dc_shutdown = 1;
6393 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6394 /* Shutdown the LCB */
6395 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006396 /*
6397 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006398 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006399 * itself.
6400 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006401 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6402}
6403
Jubin John4d114fd2016-02-14 20:21:43 -08006404/*
6405 * Calling this after the DC has been brought out of reset should not
6406 * do any damage.
6407 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006408static void dc_start(struct hfi1_devdata *dd)
6409{
6410 unsigned long flags;
6411 int ret;
6412
6413 spin_lock_irqsave(&dd->dc8051_lock, flags);
6414 if (!dd->dc_shutdown)
6415 goto done;
6416 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6417 /* Take the 8051 out of reset */
6418 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6419 /* Wait until 8051 is ready */
6420 ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6421 if (ret) {
6422 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006423 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006424 }
6425 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6426 write_csr(dd, DCC_CFG_RESET, 0x10);
6427 /* lcb_shutdown() with abort=1 does not restore these */
6428 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6429 spin_lock_irqsave(&dd->dc8051_lock, flags);
6430 dd->dc_shutdown = 0;
6431done:
6432 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6433}
6434
6435/*
6436 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6437 */
6438static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6439{
6440 u64 rx_radr, tx_radr;
6441 u32 version;
6442
6443 if (dd->icode != ICODE_FPGA_EMULATION)
6444 return;
6445
6446 /*
6447 * These LCB defaults on emulator _s are good, nothing to do here:
6448 * LCB_CFG_TX_FIFOS_RADR
6449 * LCB_CFG_RX_FIFOS_RADR
6450 * LCB_CFG_LN_DCLK
6451 * LCB_CFG_IGNORE_LOST_RCLK
6452 */
6453 if (is_emulator_s(dd))
6454 return;
6455 /* else this is _p */
6456
6457 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006458 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006459 version = 0x2d; /* all B0 use 0x2d or higher settings */
6460
6461 if (version <= 0x12) {
6462 /* release 0x12 and below */
6463
6464 /*
6465 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6466 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6467 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6468 */
6469 rx_radr =
6470 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6471 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6472 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6473 /*
6474 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6475 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6476 */
6477 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6478 } else if (version <= 0x18) {
6479 /* release 0x13 up to 0x18 */
6480 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6481 rx_radr =
6482 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6483 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6484 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6485 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6486 } else if (version == 0x19) {
6487 /* release 0x19 */
6488 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6489 rx_radr =
6490 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6491 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6492 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6493 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6494 } else if (version == 0x1a) {
6495 /* release 0x1a */
6496 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6497 rx_radr =
6498 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6499 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6500 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6501 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6502 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6503 } else {
6504 /* release 0x1b and higher */
6505 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6506 rx_radr =
6507 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6508 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6509 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6510 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6511 }
6512
6513 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6514 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6515 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006516 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006517 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6518}
6519
6520/*
6521 * Handle a SMA idle message
6522 *
6523 * This is a work-queue function outside of the interrupt.
6524 */
6525void handle_sma_message(struct work_struct *work)
6526{
6527 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6528 sma_message_work);
6529 struct hfi1_devdata *dd = ppd->dd;
6530 u64 msg;
6531 int ret;
6532
Jubin John4d114fd2016-02-14 20:21:43 -08006533 /*
6534 * msg is bytes 1-4 of the 40-bit idle message - the command code
6535 * is stripped off
6536 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006537 ret = read_idle_sma(dd, &msg);
6538 if (ret)
6539 return;
6540 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6541 /*
6542 * React to the SMA message. Byte[1] (0 for us) is the command.
6543 */
6544 switch (msg & 0xff) {
6545 case SMA_IDLE_ARM:
6546 /*
6547 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6548 * State Transitions
6549 *
6550 * Only expected in INIT or ARMED, discard otherwise.
6551 */
6552 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6553 ppd->neighbor_normal = 1;
6554 break;
6555 case SMA_IDLE_ACTIVE:
6556 /*
6557 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6558 * State Transitions
6559 *
6560 * Can activate the node. Discard otherwise.
6561 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006562 if (ppd->host_link_state == HLS_UP_ARMED &&
6563 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006564 ppd->neighbor_normal = 1;
6565 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6566 if (ret)
6567 dd_dev_err(
6568 dd,
6569 "%s: received Active SMA idle message, couldn't set link to Active\n",
6570 __func__);
6571 }
6572 break;
6573 default:
6574 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006575 "%s: received unexpected SMA idle message 0x%llx\n",
6576 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006577 break;
6578 }
6579}
6580
6581static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6582{
6583 u64 rcvctrl;
6584 unsigned long flags;
6585
6586 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6587 rcvctrl = read_csr(dd, RCV_CTRL);
6588 rcvctrl |= add;
6589 rcvctrl &= ~clear;
6590 write_csr(dd, RCV_CTRL, rcvctrl);
6591 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6592}
6593
6594static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6595{
6596 adjust_rcvctrl(dd, add, 0);
6597}
6598
6599static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6600{
6601 adjust_rcvctrl(dd, 0, clear);
6602}
6603
6604/*
6605 * Called from all interrupt handlers to start handling an SPC freeze.
6606 */
6607void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6608{
6609 struct hfi1_devdata *dd = ppd->dd;
6610 struct send_context *sc;
6611 int i;
6612
6613 if (flags & FREEZE_SELF)
6614 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6615
6616 /* enter frozen mode */
6617 dd->flags |= HFI1_FROZEN;
6618
6619 /* notify all SDMA engines that they are going into a freeze */
6620 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6621
6622 /* do halt pre-handling on all enabled send contexts */
6623 for (i = 0; i < dd->num_send_contexts; i++) {
6624 sc = dd->send_contexts[i].sc;
6625 if (sc && (sc->flags & SCF_ENABLED))
6626 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6627 }
6628
6629 /* Send context are frozen. Notify user space */
6630 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6631
6632 if (flags & FREEZE_ABORT) {
6633 dd_dev_err(dd,
6634 "Aborted freeze recovery. Please REBOOT system\n");
6635 return;
6636 }
6637 /* queue non-interrupt handler */
6638 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6639}
6640
6641/*
6642 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6643 * depending on the "freeze" parameter.
6644 *
6645 * No need to return an error if it times out, our only option
6646 * is to proceed anyway.
6647 */
6648static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6649{
6650 unsigned long timeout;
6651 u64 reg;
6652
6653 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6654 while (1) {
6655 reg = read_csr(dd, CCE_STATUS);
6656 if (freeze) {
6657 /* waiting until all indicators are set */
6658 if ((reg & ALL_FROZE) == ALL_FROZE)
6659 return; /* all done */
6660 } else {
6661 /* waiting until all indicators are clear */
6662 if ((reg & ALL_FROZE) == 0)
6663 return; /* all done */
6664 }
6665
6666 if (time_after(jiffies, timeout)) {
6667 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006668 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6669 freeze ? "" : "un", reg & ALL_FROZE,
6670 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006671 return;
6672 }
6673 usleep_range(80, 120);
6674 }
6675}
6676
6677/*
6678 * Do all freeze handling for the RXE block.
6679 */
6680static void rxe_freeze(struct hfi1_devdata *dd)
6681{
6682 int i;
6683
6684 /* disable port */
6685 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6686
6687 /* disable all receive contexts */
6688 for (i = 0; i < dd->num_rcv_contexts; i++)
6689 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6690}
6691
6692/*
6693 * Unfreeze handling for the RXE block - kernel contexts only.
6694 * This will also enable the port. User contexts will do unfreeze
6695 * handling on a per-context basis as they call into the driver.
6696 *
6697 */
6698static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6699{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006700 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006701 int i;
6702
6703 /* enable all kernel contexts */
Mitko Haralanov566c1572016-02-03 14:32:49 -08006704 for (i = 0; i < dd->n_krcv_queues; i++) {
6705 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6706 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6707 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6708 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6709 hfi1_rcvctrl(dd, rcvmask, i);
6710 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006711
6712 /* enable port */
6713 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6714}
6715
6716/*
6717 * Non-interrupt SPC freeze handling.
6718 *
6719 * This is a work-queue function outside of the triggering interrupt.
6720 */
6721void handle_freeze(struct work_struct *work)
6722{
6723 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6724 freeze_work);
6725 struct hfi1_devdata *dd = ppd->dd;
6726
6727 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006728 wait_for_freeze_status(dd, 1);
6729
6730 /* SPC is now frozen */
6731
6732 /* do send PIO freeze steps */
6733 pio_freeze(dd);
6734
6735 /* do send DMA freeze steps */
6736 sdma_freeze(dd);
6737
6738 /* do send egress freeze steps - nothing to do */
6739
6740 /* do receive freeze steps */
6741 rxe_freeze(dd);
6742
6743 /*
6744 * Unfreeze the hardware - clear the freeze, wait for each
6745 * block's frozen bit to clear, then clear the frozen flag.
6746 */
6747 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6748 wait_for_freeze_status(dd, 0);
6749
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006750 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006751 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6752 wait_for_freeze_status(dd, 1);
6753 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6754 wait_for_freeze_status(dd, 0);
6755 }
6756
6757 /* do send PIO unfreeze steps for kernel contexts */
6758 pio_kernel_unfreeze(dd);
6759
6760 /* do send DMA unfreeze steps */
6761 sdma_unfreeze(dd);
6762
6763 /* do send egress unfreeze steps - nothing to do */
6764
6765 /* do receive unfreeze steps for kernel contexts */
6766 rxe_kernel_unfreeze(dd);
6767
6768 /*
6769 * The unfreeze procedure touches global device registers when
6770 * it disables and re-enables RXE. Mark the device unfrozen
6771 * after all that is done so other parts of the driver waiting
6772 * for the device to unfreeze don't do things out of order.
6773 *
6774 * The above implies that the meaning of HFI1_FROZEN flag is
6775 * "Device has gone into freeze mode and freeze mode handling
6776 * is still in progress."
6777 *
6778 * The flag will be removed when freeze mode processing has
6779 * completed.
6780 */
6781 dd->flags &= ~HFI1_FROZEN;
6782 wake_up(&dd->event_queue);
6783
6784 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006785}
6786
6787/*
6788 * Handle a link up interrupt from the 8051.
6789 *
6790 * This is a work-queue function outside of the interrupt.
6791 */
6792void handle_link_up(struct work_struct *work)
6793{
6794 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006795 link_up_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006796 set_link_state(ppd, HLS_UP_INIT);
6797
6798 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6799 read_ltp_rtt(ppd->dd);
6800 /*
6801 * OPA specifies that certain counters are cleared on a transition
6802 * to link up, so do that.
6803 */
6804 clear_linkup_counters(ppd->dd);
6805 /*
6806 * And (re)set link up default values.
6807 */
6808 set_linkup_defaults(ppd);
6809
6810 /* enforce link speed enabled */
6811 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6812 /* oops - current speed is not enabled, bounce */
6813 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006814 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6815 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006816 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006817 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006818 set_link_state(ppd, HLS_DN_OFFLINE);
6819 start_link(ppd);
6820 }
6821}
6822
Jubin John4d114fd2016-02-14 20:21:43 -08006823/*
6824 * Several pieces of LNI information were cached for SMA in ppd.
6825 * Reset these on link down
6826 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006827static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6828{
6829 ppd->neighbor_guid = 0;
6830 ppd->neighbor_port_number = 0;
6831 ppd->neighbor_type = 0;
6832 ppd->neighbor_fm_security = 0;
6833}
6834
Dean Luickfeb831d2016-04-14 08:31:36 -07006835static const char * const link_down_reason_strs[] = {
6836 [OPA_LINKDOWN_REASON_NONE] = "None",
6837 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6838 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6839 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6840 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6841 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6842 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6843 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6844 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6845 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6846 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6847 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6848 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6849 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6850 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6851 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6852 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6853 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6854 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6855 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6856 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6857 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6858 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6859 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6860 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6861 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6862 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6863 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6864 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6865 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6866 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6867 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6868 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6869 "Excessive buffer overrun",
6870 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6871 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6872 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6873 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6874 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6875 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6876 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6877 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6878 "Local media not installed",
6879 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6880 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6881 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6882 "End to end not installed",
6883 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6884 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6885 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6886 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6887 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6888 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6889};
6890
6891/* return the neighbor link down reason string */
6892static const char *link_down_reason_str(u8 reason)
6893{
6894 const char *str = NULL;
6895
6896 if (reason < ARRAY_SIZE(link_down_reason_strs))
6897 str = link_down_reason_strs[reason];
6898 if (!str)
6899 str = "(invalid)";
6900
6901 return str;
6902}
6903
Mike Marciniszyn77241052015-07-30 15:17:43 -04006904/*
6905 * Handle a link down interrupt from the 8051.
6906 *
6907 * This is a work-queue function outside of the interrupt.
6908 */
6909void handle_link_down(struct work_struct *work)
6910{
6911 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006912 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006913 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006914 link_down_work);
6915 int was_up;
6916 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006917
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006918 if ((ppd->host_link_state &
6919 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6920 ppd->port_type == PORT_TYPE_FIXED)
6921 ppd->offline_disabled_reason =
6922 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6923
6924 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006925 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006926 set_link_state(ppd, HLS_DN_OFFLINE);
6927
Dean Luickfeb831d2016-04-14 08:31:36 -07006928 if (was_up) {
6929 lcl_reason = 0;
6930 /* link down reason is only valid if the link was up */
6931 read_link_down_reason(ppd->dd, &link_down_reason);
6932 switch (link_down_reason) {
6933 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6934 /* the link went down, no idle message reason */
6935 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6936 ldr_str);
6937 break;
6938 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6939 /*
6940 * The neighbor reason is only valid if an idle message
6941 * was received for it.
6942 */
6943 read_planned_down_reason_code(ppd->dd, &neigh_reason);
6944 dd_dev_info(ppd->dd,
6945 "%sNeighbor link down message %d, %s\n",
6946 ldr_str, neigh_reason,
6947 link_down_reason_str(neigh_reason));
6948 break;
6949 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6950 dd_dev_info(ppd->dd,
6951 "%sHost requested link to go offline\n",
6952 ldr_str);
6953 break;
6954 default:
6955 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6956 ldr_str, link_down_reason);
6957 break;
6958 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006959
Dean Luickfeb831d2016-04-14 08:31:36 -07006960 /*
6961 * If no reason, assume peer-initiated but missed
6962 * LinkGoingDown idle flits.
6963 */
6964 if (neigh_reason == 0)
6965 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6966 } else {
6967 /* went down while polling or going up */
6968 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6969 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006970
6971 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6972
Dean Luick015e91f2016-04-14 08:31:42 -07006973 /* inform the SMA when the link transitions from up to down */
6974 if (was_up && ppd->local_link_down_reason.sma == 0 &&
6975 ppd->neigh_link_down_reason.sma == 0) {
6976 ppd->local_link_down_reason.sma =
6977 ppd->local_link_down_reason.latest;
6978 ppd->neigh_link_down_reason.sma =
6979 ppd->neigh_link_down_reason.latest;
6980 }
6981
Mike Marciniszyn77241052015-07-30 15:17:43 -04006982 reset_neighbor_info(ppd);
6983
6984 /* disable the port */
6985 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6986
Jubin John4d114fd2016-02-14 20:21:43 -08006987 /*
6988 * If there is no cable attached, turn the DC off. Otherwise,
6989 * start the link bring up.
6990 */
Dean Luick0db9dec2016-09-06 04:35:20 -07006991 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006992 dc_shutdown(ppd->dd);
Dean Luick0db9dec2016-09-06 04:35:20 -07006993 else
Mike Marciniszyn77241052015-07-30 15:17:43 -04006994 start_link(ppd);
6995}
6996
6997void handle_link_bounce(struct work_struct *work)
6998{
6999 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7000 link_bounce_work);
7001
7002 /*
7003 * Only do something if the link is currently up.
7004 */
7005 if (ppd->host_link_state & HLS_UP) {
7006 set_link_state(ppd, HLS_DN_OFFLINE);
7007 start_link(ppd);
7008 } else {
7009 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007010 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007011 }
7012}
7013
7014/*
7015 * Mask conversion: Capability exchange to Port LTP. The capability
7016 * exchange has an implicit 16b CRC that is mandatory.
7017 */
7018static int cap_to_port_ltp(int cap)
7019{
7020 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7021
7022 if (cap & CAP_CRC_14B)
7023 port_ltp |= PORT_LTP_CRC_MODE_14;
7024 if (cap & CAP_CRC_48B)
7025 port_ltp |= PORT_LTP_CRC_MODE_48;
7026 if (cap & CAP_CRC_12B_16B_PER_LANE)
7027 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7028
7029 return port_ltp;
7030}
7031
7032/*
7033 * Convert an OPA Port LTP mask to capability mask
7034 */
7035int port_ltp_to_cap(int port_ltp)
7036{
7037 int cap_mask = 0;
7038
7039 if (port_ltp & PORT_LTP_CRC_MODE_14)
7040 cap_mask |= CAP_CRC_14B;
7041 if (port_ltp & PORT_LTP_CRC_MODE_48)
7042 cap_mask |= CAP_CRC_48B;
7043 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7044 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7045
7046 return cap_mask;
7047}
7048
7049/*
7050 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7051 */
7052static int lcb_to_port_ltp(int lcb_crc)
7053{
7054 int port_ltp = 0;
7055
7056 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7057 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7058 else if (lcb_crc == LCB_CRC_48B)
7059 port_ltp = PORT_LTP_CRC_MODE_48;
7060 else if (lcb_crc == LCB_CRC_14B)
7061 port_ltp = PORT_LTP_CRC_MODE_14;
7062 else
7063 port_ltp = PORT_LTP_CRC_MODE_16;
7064
7065 return port_ltp;
7066}
7067
7068/*
7069 * Our neighbor has indicated that we are allowed to act as a fabric
7070 * manager, so place the full management partition key in the second
7071 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7072 * that we should already have the limited management partition key in
7073 * array element 1, and also that the port is not yet up when
7074 * add_full_mgmt_pkey() is invoked.
7075 */
7076static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7077{
7078 struct hfi1_devdata *dd = ppd->dd;
7079
Dean Luick87645222015-12-01 15:38:21 -05007080 /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7081 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7082 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7083 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007084 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7085 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007086 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007087}
7088
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007089static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007090{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007091 if (ppd->pkeys[2] != 0) {
7092 ppd->pkeys[2] = 0;
7093 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007094 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007095 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007096}
7097
Mike Marciniszyn77241052015-07-30 15:17:43 -04007098/*
7099 * Convert the given link width to the OPA link width bitmask.
7100 */
7101static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7102{
7103 switch (width) {
7104 case 0:
7105 /*
7106 * Simulator and quick linkup do not set the width.
7107 * Just set it to 4x without complaint.
7108 */
7109 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7110 return OPA_LINK_WIDTH_4X;
7111 return 0; /* no lanes up */
7112 case 1: return OPA_LINK_WIDTH_1X;
7113 case 2: return OPA_LINK_WIDTH_2X;
7114 case 3: return OPA_LINK_WIDTH_3X;
7115 default:
7116 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007117 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007118 /* fall through */
7119 case 4: return OPA_LINK_WIDTH_4X;
7120 }
7121}
7122
7123/*
7124 * Do a population count on the bottom nibble.
7125 */
7126static const u8 bit_counts[16] = {
7127 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7128};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007129
Mike Marciniszyn77241052015-07-30 15:17:43 -04007130static inline u8 nibble_to_count(u8 nibble)
7131{
7132 return bit_counts[nibble & 0xf];
7133}
7134
7135/*
7136 * Read the active lane information from the 8051 registers and return
7137 * their widths.
7138 *
7139 * Active lane information is found in these 8051 registers:
7140 * enable_lane_tx
7141 * enable_lane_rx
7142 */
7143static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7144 u16 *rx_width)
7145{
7146 u16 tx, rx;
7147 u8 enable_lane_rx;
7148 u8 enable_lane_tx;
7149 u8 tx_polarity_inversion;
7150 u8 rx_polarity_inversion;
7151 u8 max_rate;
7152
7153 /* read the active lanes */
7154 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007155 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007156 read_local_lni(dd, &enable_lane_rx);
7157
7158 /* convert to counts */
7159 tx = nibble_to_count(enable_lane_tx);
7160 rx = nibble_to_count(enable_lane_rx);
7161
7162 /*
7163 * Set link_speed_active here, overriding what was set in
7164 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7165 * set the max_rate field in handle_verify_cap until v0.19.
7166 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007167 if ((dd->icode == ICODE_RTL_SILICON) &&
7168 (dd->dc8051_ver < dc8051_ver(0, 19))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007169 /* max_rate: 0 = 12.5G, 1 = 25G */
7170 switch (max_rate) {
7171 case 0:
7172 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7173 break;
7174 default:
7175 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007176 "%s: unexpected max rate %d, using 25Gb\n",
7177 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007178 /* fall through */
7179 case 1:
7180 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7181 break;
7182 }
7183 }
7184
7185 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007186 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7187 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007188 *tx_width = link_width_to_bits(dd, tx);
7189 *rx_width = link_width_to_bits(dd, rx);
7190}
7191
7192/*
7193 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7194 * Valid after the end of VerifyCap and during LinkUp. Does not change
7195 * after link up. I.e. look elsewhere for downgrade information.
7196 *
7197 * Bits are:
7198 * + bits [7:4] contain the number of active transmitters
7199 * + bits [3:0] contain the number of active receivers
7200 * These are numbers 1 through 4 and can be different values if the
7201 * link is asymmetric.
7202 *
7203 * verify_cap_local_fm_link_width[0] retains its original value.
7204 */
7205static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7206 u16 *rx_width)
7207{
7208 u16 widths, tx, rx;
7209 u8 misc_bits, local_flags;
7210 u16 active_tx, active_rx;
7211
7212 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7213 tx = widths >> 12;
7214 rx = (widths >> 8) & 0xf;
7215
7216 *tx_width = link_width_to_bits(dd, tx);
7217 *rx_width = link_width_to_bits(dd, rx);
7218
7219 /* print the active widths */
7220 get_link_widths(dd, &active_tx, &active_rx);
7221}
7222
7223/*
7224 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7225 * hardware information when the link first comes up.
7226 *
7227 * The link width is not available until after VerifyCap.AllFramesReceived
7228 * (the trigger for handle_verify_cap), so this is outside that routine
7229 * and should be called when the 8051 signals linkup.
7230 */
7231void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7232{
7233 u16 tx_width, rx_width;
7234
7235 /* get end-of-LNI link widths */
7236 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7237
7238 /* use tx_width as the link is supposed to be symmetric on link up */
7239 ppd->link_width_active = tx_width;
7240 /* link width downgrade active (LWD.A) starts out matching LW.A */
7241 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7242 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7243 /* per OPA spec, on link up LWD.E resets to LWD.S */
7244 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7245 /* cache the active egress rate (units {10^6 bits/sec]) */
7246 ppd->current_egress_rate = active_egress_rate(ppd);
7247}
7248
7249/*
7250 * Handle a verify capabilities interrupt from the 8051.
7251 *
7252 * This is a work-queue function outside of the interrupt.
7253 */
7254void handle_verify_cap(struct work_struct *work)
7255{
7256 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7257 link_vc_work);
7258 struct hfi1_devdata *dd = ppd->dd;
7259 u64 reg;
7260 u8 power_management;
7261 u8 continious;
7262 u8 vcu;
7263 u8 vau;
7264 u8 z;
7265 u16 vl15buf;
7266 u16 link_widths;
7267 u16 crc_mask;
7268 u16 crc_val;
7269 u16 device_id;
7270 u16 active_tx, active_rx;
7271 u8 partner_supported_crc;
7272 u8 remote_tx_rate;
7273 u8 device_rev;
7274
7275 set_link_state(ppd, HLS_VERIFY_CAP);
7276
7277 lcb_shutdown(dd, 0);
7278 adjust_lcb_for_fpga_serdes(dd);
7279
7280 /*
7281 * These are now valid:
7282 * remote VerifyCap fields in the general LNI config
7283 * CSR DC8051_STS_REMOTE_GUID
7284 * CSR DC8051_STS_REMOTE_NODE_TYPE
7285 * CSR DC8051_STS_REMOTE_FM_SECURITY
7286 * CSR DC8051_STS_REMOTE_PORT_NO
7287 */
7288
7289 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007290 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7291 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007292 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7293 read_remote_device_id(dd, &device_id, &device_rev);
7294 /*
7295 * And the 'MgmtAllowed' information, which is exchanged during
7296 * LNI, is also be available at this point.
7297 */
7298 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7299 /* print the active widths */
7300 get_link_widths(dd, &active_tx, &active_rx);
7301 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007302 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7303 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007304 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007305 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7306 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7307 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007308 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007309 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007310 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007311 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007312 /*
7313 * The peer vAU value just read is the peer receiver value. HFI does
7314 * not support a transmit vAU of 0 (AU == 8). We advertised that
7315 * with Z=1 in the fabric capabilities sent to the peer. The peer
7316 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7317 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7318 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7319 * subject to the Z value exception.
7320 */
7321 if (vau == 0)
7322 vau = 1;
7323 set_up_vl15(dd, vau, vl15buf);
7324
7325 /* set up the LCB CRC mode */
7326 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7327
7328 /* order is important: use the lowest bit in common */
7329 if (crc_mask & CAP_CRC_14B)
7330 crc_val = LCB_CRC_14B;
7331 else if (crc_mask & CAP_CRC_48B)
7332 crc_val = LCB_CRC_48B;
7333 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7334 crc_val = LCB_CRC_12B_16B_PER_LANE;
7335 else
7336 crc_val = LCB_CRC_16B;
7337
7338 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7339 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7340 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7341
7342 /* set (14b only) or clear sideband credit */
7343 reg = read_csr(dd, SEND_CM_CTRL);
7344 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7345 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007346 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007347 } else {
7348 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007349 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007350 }
7351
7352 ppd->link_speed_active = 0; /* invalid value */
7353 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7354 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7355 switch (remote_tx_rate) {
7356 case 0:
7357 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7358 break;
7359 case 1:
7360 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7361 break;
7362 }
7363 } else {
7364 /* actual rate is highest bit of the ANDed rates */
7365 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7366
7367 if (rate & 2)
7368 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7369 else if (rate & 1)
7370 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7371 }
7372 if (ppd->link_speed_active == 0) {
7373 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007374 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007375 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7376 }
7377
7378 /*
7379 * Cache the values of the supported, enabled, and active
7380 * LTP CRC modes to return in 'portinfo' queries. But the bit
7381 * flags that are returned in the portinfo query differ from
7382 * what's in the link_crc_mask, crc_sizes, and crc_val
7383 * variables. Convert these here.
7384 */
7385 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7386 /* supported crc modes */
7387 ppd->port_ltp_crc_mode |=
7388 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7389 /* enabled crc modes */
7390 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7391 /* active crc mode */
7392
7393 /* set up the remote credit return table */
7394 assign_remote_cm_au_table(dd, vcu);
7395
7396 /*
7397 * The LCB is reset on entry to handle_verify_cap(), so this must
7398 * be applied on every link up.
7399 *
7400 * Adjust LCB error kill enable to kill the link if
7401 * these RBUF errors are seen:
7402 * REPLAY_BUF_MBE_SMASK
7403 * FLIT_INPUT_BUF_MBE_SMASK
7404 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007405 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007406 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7407 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7408 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7409 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7410 }
7411
7412 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7413 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7414
7415 /* give 8051 access to the LCB CSRs */
7416 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7417 set_8051_lcb_access(dd);
7418
7419 ppd->neighbor_guid =
7420 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7421 ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7422 DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7423 ppd->neighbor_type =
7424 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7425 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7426 ppd->neighbor_fm_security =
7427 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7428 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7429 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007430 "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7431 ppd->neighbor_guid, ppd->neighbor_type,
7432 ppd->mgmt_allowed, ppd->neighbor_fm_security);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007433 if (ppd->mgmt_allowed)
7434 add_full_mgmt_pkey(ppd);
7435
7436 /* tell the 8051 to go to LinkUp */
7437 set_link_state(ppd, HLS_GOING_UP);
7438}
7439
7440/*
7441 * Apply the link width downgrade enabled policy against the current active
7442 * link widths.
7443 *
7444 * Called when the enabled policy changes or the active link widths change.
7445 */
7446void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7447{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007448 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007449 int tries;
7450 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007451 u16 tx, rx;
7452
Dean Luick323fd782015-11-16 21:59:24 -05007453 /* use the hls lock to avoid a race with actual link up */
7454 tries = 0;
7455retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007456 mutex_lock(&ppd->hls_lock);
7457 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007458 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007459 /* still going up..wait and retry */
7460 if (ppd->host_link_state & HLS_GOING_UP) {
7461 if (++tries < 1000) {
7462 mutex_unlock(&ppd->hls_lock);
7463 usleep_range(100, 120); /* arbitrary */
7464 goto retry;
7465 }
7466 dd_dev_err(ppd->dd,
7467 "%s: giving up waiting for link state change\n",
7468 __func__);
7469 }
7470 goto done;
7471 }
7472
7473 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007474
7475 if (refresh_widths) {
7476 get_link_widths(ppd->dd, &tx, &rx);
7477 ppd->link_width_downgrade_tx_active = tx;
7478 ppd->link_width_downgrade_rx_active = rx;
7479 }
7480
Dean Luickf9b56352016-04-14 08:31:30 -07007481 if (ppd->link_width_downgrade_tx_active == 0 ||
7482 ppd->link_width_downgrade_rx_active == 0) {
7483 /* the 8051 reported a dead link as a downgrade */
7484 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7485 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007486 /* downgrade is disabled */
7487
7488 /* bounce if not at starting active width */
7489 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007490 ppd->link_width_downgrade_tx_active) ||
7491 (ppd->link_width_active !=
7492 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007493 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007494 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007495 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007496 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7497 ppd->link_width_active,
7498 ppd->link_width_downgrade_tx_active,
7499 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007500 do_bounce = 1;
7501 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007502 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7503 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007504 /* Tx or Rx is outside the enabled policy */
7505 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007506 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007507 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007508 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7509 lwde, ppd->link_width_downgrade_tx_active,
7510 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007511 do_bounce = 1;
7512 }
7513
Dean Luick323fd782015-11-16 21:59:24 -05007514done:
7515 mutex_unlock(&ppd->hls_lock);
7516
Mike Marciniszyn77241052015-07-30 15:17:43 -04007517 if (do_bounce) {
7518 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007519 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007520 set_link_state(ppd, HLS_DN_OFFLINE);
7521 start_link(ppd);
7522 }
7523}
7524
7525/*
7526 * Handle a link downgrade interrupt from the 8051.
7527 *
7528 * This is a work-queue function outside of the interrupt.
7529 */
7530void handle_link_downgrade(struct work_struct *work)
7531{
7532 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7533 link_downgrade_work);
7534
7535 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7536 apply_link_downgrade_policy(ppd, 1);
7537}
7538
7539static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7540{
7541 return flag_string(buf, buf_len, flags, dcc_err_flags,
7542 ARRAY_SIZE(dcc_err_flags));
7543}
7544
7545static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7546{
7547 return flag_string(buf, buf_len, flags, lcb_err_flags,
7548 ARRAY_SIZE(lcb_err_flags));
7549}
7550
7551static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7552{
7553 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7554 ARRAY_SIZE(dc8051_err_flags));
7555}
7556
7557static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7558{
7559 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7560 ARRAY_SIZE(dc8051_info_err_flags));
7561}
7562
7563static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7564{
7565 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7566 ARRAY_SIZE(dc8051_info_host_msg_flags));
7567}
7568
7569static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7570{
7571 struct hfi1_pportdata *ppd = dd->pport;
7572 u64 info, err, host_msg;
7573 int queue_link_down = 0;
7574 char buf[96];
7575
7576 /* look at the flags */
7577 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7578 /* 8051 information set by firmware */
7579 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7580 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7581 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7582 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7583 host_msg = (info >>
7584 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7585 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7586
7587 /*
7588 * Handle error flags.
7589 */
7590 if (err & FAILED_LNI) {
7591 /*
7592 * LNI error indications are cleared by the 8051
7593 * only when starting polling. Only pay attention
7594 * to them when in the states that occur during
7595 * LNI.
7596 */
7597 if (ppd->host_link_state
7598 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7599 queue_link_down = 1;
7600 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007601 dc8051_info_err_string(buf,
7602 sizeof(buf),
7603 err &
7604 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007605 }
7606 err &= ~(u64)FAILED_LNI;
7607 }
Dean Luick6d014532015-12-01 15:38:23 -05007608 /* unknown frames can happen durning LNI, just count */
7609 if (err & UNKNOWN_FRAME) {
7610 ppd->unknown_frame_count++;
7611 err &= ~(u64)UNKNOWN_FRAME;
7612 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007613 if (err) {
7614 /* report remaining errors, but do not do anything */
7615 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007616 dc8051_info_err_string(buf, sizeof(buf),
7617 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007618 }
7619
7620 /*
7621 * Handle host message flags.
7622 */
7623 if (host_msg & HOST_REQ_DONE) {
7624 /*
7625 * Presently, the driver does a busy wait for
7626 * host requests to complete. This is only an
7627 * informational message.
7628 * NOTE: The 8051 clears the host message
7629 * information *on the next 8051 command*.
7630 * Therefore, when linkup is achieved,
7631 * this flag will still be set.
7632 */
7633 host_msg &= ~(u64)HOST_REQ_DONE;
7634 }
7635 if (host_msg & BC_SMA_MSG) {
7636 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7637 host_msg &= ~(u64)BC_SMA_MSG;
7638 }
7639 if (host_msg & LINKUP_ACHIEVED) {
7640 dd_dev_info(dd, "8051: Link up\n");
7641 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7642 host_msg &= ~(u64)LINKUP_ACHIEVED;
7643 }
7644 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007645 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007646 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7647 }
7648 if (host_msg & VERIFY_CAP_FRAME) {
7649 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7650 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7651 }
7652 if (host_msg & LINK_GOING_DOWN) {
7653 const char *extra = "";
7654 /* no downgrade action needed if going down */
7655 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7656 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7657 extra = " (ignoring downgrade)";
7658 }
7659 dd_dev_info(dd, "8051: Link down%s\n", extra);
7660 queue_link_down = 1;
7661 host_msg &= ~(u64)LINK_GOING_DOWN;
7662 }
7663 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7664 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7665 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7666 }
7667 if (host_msg) {
7668 /* report remaining messages, but do not do anything */
7669 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007670 dc8051_info_host_msg_string(buf,
7671 sizeof(buf),
7672 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007673 }
7674
7675 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7676 }
7677 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7678 /*
7679 * Lost the 8051 heartbeat. If this happens, we
7680 * receive constant interrupts about it. Disable
7681 * the interrupt after the first.
7682 */
7683 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7684 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007685 read_csr(dd, DC_DC8051_ERR_EN) &
7686 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007687
7688 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7689 }
7690 if (reg) {
7691 /* report the error, but do not do anything */
7692 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007693 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007694 }
7695
7696 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007697 /*
7698 * if the link is already going down or disabled, do not
7699 * queue another
7700 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007701 if ((ppd->host_link_state &
7702 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7703 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007704 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007705 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007706 } else {
7707 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7708 }
7709 }
7710}
7711
7712static const char * const fm_config_txt[] = {
7713[0] =
7714 "BadHeadDist: Distance violation between two head flits",
7715[1] =
7716 "BadTailDist: Distance violation between two tail flits",
7717[2] =
7718 "BadCtrlDist: Distance violation between two credit control flits",
7719[3] =
7720 "BadCrdAck: Credits return for unsupported VL",
7721[4] =
7722 "UnsupportedVLMarker: Received VL Marker",
7723[5] =
7724 "BadPreempt: Exceeded the preemption nesting level",
7725[6] =
7726 "BadControlFlit: Received unsupported control flit",
7727/* no 7 */
7728[8] =
7729 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7730};
7731
7732static const char * const port_rcv_txt[] = {
7733[1] =
7734 "BadPktLen: Illegal PktLen",
7735[2] =
7736 "PktLenTooLong: Packet longer than PktLen",
7737[3] =
7738 "PktLenTooShort: Packet shorter than PktLen",
7739[4] =
7740 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7741[5] =
7742 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7743[6] =
7744 "BadL2: Illegal L2 opcode",
7745[7] =
7746 "BadSC: Unsupported SC",
7747[9] =
7748 "BadRC: Illegal RC",
7749[11] =
7750 "PreemptError: Preempting with same VL",
7751[12] =
7752 "PreemptVL15: Preempting a VL15 packet",
7753};
7754
7755#define OPA_LDR_FMCONFIG_OFFSET 16
7756#define OPA_LDR_PORTRCV_OFFSET 0
7757static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7758{
7759 u64 info, hdr0, hdr1;
7760 const char *extra;
7761 char buf[96];
7762 struct hfi1_pportdata *ppd = dd->pport;
7763 u8 lcl_reason = 0;
7764 int do_bounce = 0;
7765
7766 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7767 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7768 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7769 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7770 /* set status bit */
7771 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7772 }
7773 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7774 }
7775
7776 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7777 struct hfi1_pportdata *ppd = dd->pport;
7778 /* this counter saturates at (2^32) - 1 */
7779 if (ppd->link_downed < (u32)UINT_MAX)
7780 ppd->link_downed++;
7781 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7782 }
7783
7784 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7785 u8 reason_valid = 1;
7786
7787 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7788 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7789 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7790 /* set status bit */
7791 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7792 }
7793 switch (info) {
7794 case 0:
7795 case 1:
7796 case 2:
7797 case 3:
7798 case 4:
7799 case 5:
7800 case 6:
7801 extra = fm_config_txt[info];
7802 break;
7803 case 8:
7804 extra = fm_config_txt[info];
7805 if (ppd->port_error_action &
7806 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7807 do_bounce = 1;
7808 /*
7809 * lcl_reason cannot be derived from info
7810 * for this error
7811 */
7812 lcl_reason =
7813 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7814 }
7815 break;
7816 default:
7817 reason_valid = 0;
7818 snprintf(buf, sizeof(buf), "reserved%lld", info);
7819 extra = buf;
7820 break;
7821 }
7822
7823 if (reason_valid && !do_bounce) {
7824 do_bounce = ppd->port_error_action &
7825 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7826 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7827 }
7828
7829 /* just report this */
7830 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7831 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7832 }
7833
7834 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7835 u8 reason_valid = 1;
7836
7837 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7838 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7839 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7840 if (!(dd->err_info_rcvport.status_and_code &
7841 OPA_EI_STATUS_SMASK)) {
7842 dd->err_info_rcvport.status_and_code =
7843 info & OPA_EI_CODE_SMASK;
7844 /* set status bit */
7845 dd->err_info_rcvport.status_and_code |=
7846 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007847 /*
7848 * save first 2 flits in the packet that caused
7849 * the error
7850 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007851 dd->err_info_rcvport.packet_flit1 = hdr0;
7852 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007853 }
7854 switch (info) {
7855 case 1:
7856 case 2:
7857 case 3:
7858 case 4:
7859 case 5:
7860 case 6:
7861 case 7:
7862 case 9:
7863 case 11:
7864 case 12:
7865 extra = port_rcv_txt[info];
7866 break;
7867 default:
7868 reason_valid = 0;
7869 snprintf(buf, sizeof(buf), "reserved%lld", info);
7870 extra = buf;
7871 break;
7872 }
7873
7874 if (reason_valid && !do_bounce) {
7875 do_bounce = ppd->port_error_action &
7876 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7877 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7878 }
7879
7880 /* just report this */
7881 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7882 dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007883 hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007884
7885 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7886 }
7887
7888 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7889 /* informative only */
7890 dd_dev_info(dd, "8051 access to LCB blocked\n");
7891 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7892 }
7893 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7894 /* informative only */
7895 dd_dev_info(dd, "host access to LCB blocked\n");
7896 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7897 }
7898
7899 /* report any remaining errors */
7900 if (reg)
7901 dd_dev_info(dd, "DCC Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007902 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007903
7904 if (lcl_reason == 0)
7905 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7906
7907 if (do_bounce) {
7908 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7909 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7910 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7911 }
7912}
7913
7914static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7915{
7916 char buf[96];
7917
7918 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007919 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007920}
7921
7922/*
7923 * CCE block DC interrupt. Source is < 8.
7924 */
7925static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7926{
7927 const struct err_reg_info *eri = &dc_errs[source];
7928
7929 if (eri->handler) {
7930 interrupt_clear_down(dd, 0, eri);
7931 } else if (source == 3 /* dc_lbm_int */) {
7932 /*
7933 * This indicates that a parity error has occurred on the
7934 * address/control lines presented to the LBM. The error
7935 * is a single pulse, there is no associated error flag,
7936 * and it is non-maskable. This is because if a parity
7937 * error occurs on the request the request is dropped.
7938 * This should never occur, but it is nice to know if it
7939 * ever does.
7940 */
7941 dd_dev_err(dd, "Parity error in DC LBM block\n");
7942 } else {
7943 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7944 }
7945}
7946
7947/*
7948 * TX block send credit interrupt. Source is < 160.
7949 */
7950static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7951{
7952 sc_group_release_update(dd, source);
7953}
7954
7955/*
7956 * TX block SDMA interrupt. Source is < 48.
7957 *
7958 * SDMA interrupts are grouped by type:
7959 *
7960 * 0 - N-1 = SDma
7961 * N - 2N-1 = SDmaProgress
7962 * 2N - 3N-1 = SDmaIdle
7963 */
7964static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7965{
7966 /* what interrupt */
7967 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
7968 /* which engine */
7969 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7970
7971#ifdef CONFIG_SDMA_VERBOSITY
7972 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7973 slashstrip(__FILE__), __LINE__, __func__);
7974 sdma_dumpstate(&dd->per_sdma[which]);
7975#endif
7976
7977 if (likely(what < 3 && which < dd->num_sdma)) {
7978 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7979 } else {
7980 /* should not happen */
7981 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7982 }
7983}
7984
7985/*
7986 * RX block receive available interrupt. Source is < 160.
7987 */
7988static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7989{
7990 struct hfi1_ctxtdata *rcd;
7991 char *err_detail;
7992
7993 if (likely(source < dd->num_rcv_contexts)) {
7994 rcd = dd->rcd[source];
7995 if (rcd) {
7996 if (source < dd->first_user_ctxt)
Dean Luickf4f30031c2015-10-26 10:28:44 -04007997 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007998 else
7999 handle_user_interrupt(rcd);
8000 return; /* OK */
8001 }
8002 /* received an interrupt, but no rcd */
8003 err_detail = "dataless";
8004 } else {
8005 /* received an interrupt, but are not using that context */
8006 err_detail = "out of range";
8007 }
8008 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008009 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008010}
8011
8012/*
8013 * RX block receive urgent interrupt. Source is < 160.
8014 */
8015static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8016{
8017 struct hfi1_ctxtdata *rcd;
8018 char *err_detail;
8019
8020 if (likely(source < dd->num_rcv_contexts)) {
8021 rcd = dd->rcd[source];
8022 if (rcd) {
8023 /* only pay attention to user urgent interrupts */
8024 if (source >= dd->first_user_ctxt)
8025 handle_user_interrupt(rcd);
8026 return; /* OK */
8027 }
8028 /* received an interrupt, but no rcd */
8029 err_detail = "dataless";
8030 } else {
8031 /* received an interrupt, but are not using that context */
8032 err_detail = "out of range";
8033 }
8034 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008035 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008036}
8037
8038/*
8039 * Reserved range interrupt. Should not be called in normal operation.
8040 */
8041static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8042{
8043 char name[64];
8044
8045 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008046 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008047}
8048
8049static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008050/*
8051 * start end
8052 * name func interrupt func
8053 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008054{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8055 is_misc_err_name, is_misc_err_int },
8056{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8057 is_sdma_eng_err_name, is_sdma_eng_err_int },
8058{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8059 is_sendctxt_err_name, is_sendctxt_err_int },
8060{ IS_SDMA_START, IS_SDMA_END,
8061 is_sdma_eng_name, is_sdma_eng_int },
8062{ IS_VARIOUS_START, IS_VARIOUS_END,
8063 is_various_name, is_various_int },
8064{ IS_DC_START, IS_DC_END,
8065 is_dc_name, is_dc_int },
8066{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8067 is_rcv_avail_name, is_rcv_avail_int },
8068{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8069 is_rcv_urgent_name, is_rcv_urgent_int },
8070{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8071 is_send_credit_name, is_send_credit_int},
8072{ IS_RESERVED_START, IS_RESERVED_END,
8073 is_reserved_name, is_reserved_int},
8074};
8075
8076/*
8077 * Interrupt source interrupt - called when the given source has an interrupt.
8078 * Source is a bit index into an array of 64-bit integers.
8079 */
8080static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8081{
8082 const struct is_table *entry;
8083
8084 /* avoids a double compare by walking the table in-order */
8085 for (entry = &is_table[0]; entry->is_name; entry++) {
8086 if (source < entry->end) {
8087 trace_hfi1_interrupt(dd, entry, source);
8088 entry->is_int(dd, source - entry->start);
8089 return;
8090 }
8091 }
8092 /* fell off the end */
8093 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8094}
8095
8096/*
8097 * General interrupt handler. This is able to correctly handle
8098 * all interrupts in case INTx is used.
8099 */
8100static irqreturn_t general_interrupt(int irq, void *data)
8101{
8102 struct hfi1_devdata *dd = data;
8103 u64 regs[CCE_NUM_INT_CSRS];
8104 u32 bit;
8105 int i;
8106
8107 this_cpu_inc(*dd->int_counter);
8108
8109 /* phase 1: scan and clear all handled interrupts */
8110 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8111 if (dd->gi_mask[i] == 0) {
8112 regs[i] = 0; /* used later */
8113 continue;
8114 }
8115 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8116 dd->gi_mask[i];
8117 /* only clear if anything is set */
8118 if (regs[i])
8119 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8120 }
8121
8122 /* phase 2: call the appropriate handler */
8123 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008124 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008125 is_interrupt(dd, bit);
8126 }
8127
8128 return IRQ_HANDLED;
8129}
8130
8131static irqreturn_t sdma_interrupt(int irq, void *data)
8132{
8133 struct sdma_engine *sde = data;
8134 struct hfi1_devdata *dd = sde->dd;
8135 u64 status;
8136
8137#ifdef CONFIG_SDMA_VERBOSITY
8138 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8139 slashstrip(__FILE__), __LINE__, __func__);
8140 sdma_dumpstate(sde);
8141#endif
8142
8143 this_cpu_inc(*dd->int_counter);
8144
8145 /* This read_csr is really bad in the hot path */
8146 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008147 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8148 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008149 if (likely(status)) {
8150 /* clear the interrupt(s) */
8151 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008152 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8153 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008154
8155 /* handle the interrupt(s) */
8156 sdma_engine_interrupt(sde, status);
8157 } else
8158 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008159 sde->this_idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008160
8161 return IRQ_HANDLED;
8162}
8163
8164/*
Dean Luickecd42f82016-02-03 14:35:14 -08008165 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8166 * to insure that the write completed. This does NOT guarantee that
8167 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008168 */
8169static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8170{
8171 struct hfi1_devdata *dd = rcd->dd;
8172 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8173
8174 mmiowb(); /* make sure everything before is written */
8175 write_csr(dd, addr, rcd->imask);
8176 /* force the above write on the chip and get a value back */
8177 (void)read_csr(dd, addr);
8178}
8179
8180/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008181void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008182{
8183 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8184}
8185
Dean Luickecd42f82016-02-03 14:35:14 -08008186/*
8187 * Return non-zero if a packet is present.
8188 *
8189 * This routine is called when rechecking for packets after the RcvAvail
8190 * interrupt has been cleared down. First, do a quick check of memory for
8191 * a packet present. If not found, use an expensive CSR read of the context
8192 * tail to determine the actual tail. The CSR read is necessary because there
8193 * is no method to push pending DMAs to memory other than an interrupt and we
8194 * are trying to determine if we need to force an interrupt.
8195 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008196static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8197{
Dean Luickecd42f82016-02-03 14:35:14 -08008198 u32 tail;
8199 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008200
Dean Luickecd42f82016-02-03 14:35:14 -08008201 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8202 present = (rcd->seq_cnt ==
8203 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8204 else /* is RDMA rtail */
8205 present = (rcd->head != get_rcvhdrtail(rcd));
8206
8207 if (present)
8208 return 1;
8209
8210 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8211 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8212 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008213}
8214
8215/*
8216 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8217 * This routine will try to handle packets immediately (latency), but if
8218 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008219 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008220 * invoked) is finished. The intent is to avoid extra interrupts while we
8221 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008222 */
8223static irqreturn_t receive_context_interrupt(int irq, void *data)
8224{
8225 struct hfi1_ctxtdata *rcd = data;
8226 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008227 int disposition;
8228 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008229
8230 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8231 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008232 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008233
Dean Luickf4f30031c2015-10-26 10:28:44 -04008234 /* receive interrupt remains blocked while processing packets */
8235 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008236
Dean Luickf4f30031c2015-10-26 10:28:44 -04008237 /*
8238 * Too many packets were seen while processing packets in this
8239 * IRQ handler. Invoke the handler thread. The receive interrupt
8240 * remains blocked.
8241 */
8242 if (disposition == RCV_PKT_LIMIT)
8243 return IRQ_WAKE_THREAD;
8244
8245 /*
8246 * The packet processor detected no more packets. Clear the receive
8247 * interrupt and recheck for a packet packet that may have arrived
8248 * after the previous check and interrupt clear. If a packet arrived,
8249 * force another interrupt.
8250 */
8251 clear_recv_intr(rcd);
8252 present = check_packet_present(rcd);
8253 if (present)
8254 force_recv_intr(rcd);
8255
8256 return IRQ_HANDLED;
8257}
8258
8259/*
8260 * Receive packet thread handler. This expects to be invoked with the
8261 * receive interrupt still blocked.
8262 */
8263static irqreturn_t receive_context_thread(int irq, void *data)
8264{
8265 struct hfi1_ctxtdata *rcd = data;
8266 int present;
8267
8268 /* receive interrupt is still blocked from the IRQ handler */
8269 (void)rcd->do_interrupt(rcd, 1);
8270
8271 /*
8272 * The packet processor will only return if it detected no more
8273 * packets. Hold IRQs here so we can safely clear the interrupt and
8274 * recheck for a packet that may have arrived after the previous
8275 * check and the interrupt clear. If a packet arrived, force another
8276 * interrupt.
8277 */
8278 local_irq_disable();
8279 clear_recv_intr(rcd);
8280 present = check_packet_present(rcd);
8281 if (present)
8282 force_recv_intr(rcd);
8283 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008284
8285 return IRQ_HANDLED;
8286}
8287
8288/* ========================================================================= */
8289
8290u32 read_physical_state(struct hfi1_devdata *dd)
8291{
8292 u64 reg;
8293
8294 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8295 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8296 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8297}
8298
Jim Snowfb9036d2016-01-11 18:32:21 -05008299u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008300{
8301 u64 reg;
8302
8303 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8304 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8305 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8306}
8307
8308static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8309{
8310 u64 reg;
8311
8312 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8313 /* clear current state, set new state */
8314 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8315 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8316 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8317}
8318
8319/*
8320 * Use the 8051 to read a LCB CSR.
8321 */
8322static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8323{
8324 u32 regno;
8325 int ret;
8326
8327 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8328 if (acquire_lcb_access(dd, 0) == 0) {
8329 *data = read_csr(dd, addr);
8330 release_lcb_access(dd, 0);
8331 return 0;
8332 }
8333 return -EBUSY;
8334 }
8335
8336 /* register is an index of LCB registers: (offset - base) / 8 */
8337 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8338 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8339 if (ret != HCMD_SUCCESS)
8340 return -EBUSY;
8341 return 0;
8342}
8343
8344/*
8345 * Read an LCB CSR. Access may not be in host control, so check.
8346 * Return 0 on success, -EBUSY on failure.
8347 */
8348int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8349{
8350 struct hfi1_pportdata *ppd = dd->pport;
8351
8352 /* if up, go through the 8051 for the value */
8353 if (ppd->host_link_state & HLS_UP)
8354 return read_lcb_via_8051(dd, addr, data);
8355 /* if going up or down, no access */
8356 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8357 return -EBUSY;
8358 /* otherwise, host has access */
8359 *data = read_csr(dd, addr);
8360 return 0;
8361}
8362
8363/*
8364 * Use the 8051 to write a LCB CSR.
8365 */
8366static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8367{
Dean Luick3bf40d62015-11-06 20:07:04 -05008368 u32 regno;
8369 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008370
Dean Luick3bf40d62015-11-06 20:07:04 -05008371 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8372 (dd->dc8051_ver < dc8051_ver(0, 20))) {
8373 if (acquire_lcb_access(dd, 0) == 0) {
8374 write_csr(dd, addr, data);
8375 release_lcb_access(dd, 0);
8376 return 0;
8377 }
8378 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008379 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008380
8381 /* register is an index of LCB registers: (offset - base) / 8 */
8382 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8383 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8384 if (ret != HCMD_SUCCESS)
8385 return -EBUSY;
8386 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008387}
8388
8389/*
8390 * Write an LCB CSR. Access may not be in host control, so check.
8391 * Return 0 on success, -EBUSY on failure.
8392 */
8393int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8394{
8395 struct hfi1_pportdata *ppd = dd->pport;
8396
8397 /* if up, go through the 8051 for the value */
8398 if (ppd->host_link_state & HLS_UP)
8399 return write_lcb_via_8051(dd, addr, data);
8400 /* if going up or down, no access */
8401 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8402 return -EBUSY;
8403 /* otherwise, host has access */
8404 write_csr(dd, addr, data);
8405 return 0;
8406}
8407
8408/*
8409 * Returns:
8410 * < 0 = Linux error, not able to get access
8411 * > 0 = 8051 command RETURN_CODE
8412 */
8413static int do_8051_command(
8414 struct hfi1_devdata *dd,
8415 u32 type,
8416 u64 in_data,
8417 u64 *out_data)
8418{
8419 u64 reg, completed;
8420 int return_code;
8421 unsigned long flags;
8422 unsigned long timeout;
8423
8424 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8425
8426 /*
8427 * Alternative to holding the lock for a long time:
8428 * - keep busy wait - have other users bounce off
8429 */
8430 spin_lock_irqsave(&dd->dc8051_lock, flags);
8431
8432 /* We can't send any commands to the 8051 if it's in reset */
8433 if (dd->dc_shutdown) {
8434 return_code = -ENODEV;
8435 goto fail;
8436 }
8437
8438 /*
8439 * If an 8051 host command timed out previously, then the 8051 is
8440 * stuck.
8441 *
8442 * On first timeout, attempt to reset and restart the entire DC
8443 * block (including 8051). (Is this too big of a hammer?)
8444 *
8445 * If the 8051 times out a second time, the reset did not bring it
8446 * back to healthy life. In that case, fail any subsequent commands.
8447 */
8448 if (dd->dc8051_timed_out) {
8449 if (dd->dc8051_timed_out > 1) {
8450 dd_dev_err(dd,
8451 "Previous 8051 host command timed out, skipping command %u\n",
8452 type);
8453 return_code = -ENXIO;
8454 goto fail;
8455 }
8456 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8457 dc_shutdown(dd);
8458 dc_start(dd);
8459 spin_lock_irqsave(&dd->dc8051_lock, flags);
8460 }
8461
8462 /*
8463 * If there is no timeout, then the 8051 command interface is
8464 * waiting for a command.
8465 */
8466
8467 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008468 * When writing a LCB CSR, out_data contains the full value to
8469 * to be written, while in_data contains the relative LCB
8470 * address in 7:0. Do the work here, rather than the caller,
8471 * of distrubting the write data to where it needs to go:
8472 *
8473 * Write data
8474 * 39:00 -> in_data[47:8]
8475 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8476 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8477 */
8478 if (type == HCMD_WRITE_LCB_CSR) {
8479 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8480 reg = ((((*out_data) >> 40) & 0xff) <<
8481 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8482 | ((((*out_data) >> 48) & 0xffff) <<
8483 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8484 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8485 }
8486
8487 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008488 * Do two writes: the first to stabilize the type and req_data, the
8489 * second to activate.
8490 */
8491 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8492 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8493 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8494 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8495 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8496 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8497 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8498
8499 /* wait for completion, alternate: interrupt */
8500 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8501 while (1) {
8502 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8503 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8504 if (completed)
8505 break;
8506 if (time_after(jiffies, timeout)) {
8507 dd->dc8051_timed_out++;
8508 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8509 if (out_data)
8510 *out_data = 0;
8511 return_code = -ETIMEDOUT;
8512 goto fail;
8513 }
8514 udelay(2);
8515 }
8516
8517 if (out_data) {
8518 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8519 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8520 if (type == HCMD_READ_LCB_CSR) {
8521 /* top 16 bits are in a different register */
8522 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8523 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8524 << (48
8525 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8526 }
8527 }
8528 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8529 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8530 dd->dc8051_timed_out = 0;
8531 /*
8532 * Clear command for next user.
8533 */
8534 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8535
8536fail:
8537 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8538
8539 return return_code;
8540}
8541
8542static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8543{
8544 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8545}
8546
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008547int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8548 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008549{
8550 u64 data;
8551 int ret;
8552
8553 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8554 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8555 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8556 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8557 if (ret != HCMD_SUCCESS) {
8558 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008559 "load 8051 config: field id %d, lane %d, err %d\n",
8560 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008561 }
8562 return ret;
8563}
8564
8565/*
8566 * Read the 8051 firmware "registers". Use the RAM directly. Always
8567 * set the result, even on error.
8568 * Return 0 on success, -errno on failure
8569 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008570int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8571 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008572{
8573 u64 big_data;
8574 u32 addr;
8575 int ret;
8576
8577 /* address start depends on the lane_id */
8578 if (lane_id < 4)
8579 addr = (4 * NUM_GENERAL_FIELDS)
8580 + (lane_id * 4 * NUM_LANE_FIELDS);
8581 else
8582 addr = 0;
8583 addr += field_id * 4;
8584
8585 /* read is in 8-byte chunks, hardware will truncate the address down */
8586 ret = read_8051_data(dd, addr, 8, &big_data);
8587
8588 if (ret == 0) {
8589 /* extract the 4 bytes we want */
8590 if (addr & 0x4)
8591 *result = (u32)(big_data >> 32);
8592 else
8593 *result = (u32)big_data;
8594 } else {
8595 *result = 0;
8596 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008597 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008598 }
8599
8600 return ret;
8601}
8602
8603static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8604 u8 continuous)
8605{
8606 u32 frame;
8607
8608 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8609 | power_management << POWER_MANAGEMENT_SHIFT;
8610 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8611 GENERAL_CONFIG, frame);
8612}
8613
8614static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8615 u16 vl15buf, u8 crc_sizes)
8616{
8617 u32 frame;
8618
8619 frame = (u32)vau << VAU_SHIFT
8620 | (u32)z << Z_SHIFT
8621 | (u32)vcu << VCU_SHIFT
8622 | (u32)vl15buf << VL15BUF_SHIFT
8623 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8624 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8625 GENERAL_CONFIG, frame);
8626}
8627
8628static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8629 u8 *flag_bits, u16 *link_widths)
8630{
8631 u32 frame;
8632
8633 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008634 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008635 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8636 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8637 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8638}
8639
8640static int write_vc_local_link_width(struct hfi1_devdata *dd,
8641 u8 misc_bits,
8642 u8 flag_bits,
8643 u16 link_widths)
8644{
8645 u32 frame;
8646
8647 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8648 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8649 | (u32)link_widths << LINK_WIDTH_SHIFT;
8650 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8651 frame);
8652}
8653
8654static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8655 u8 device_rev)
8656{
8657 u32 frame;
8658
8659 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8660 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8661 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8662}
8663
8664static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8665 u8 *device_rev)
8666{
8667 u32 frame;
8668
8669 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8670 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8671 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8672 & REMOTE_DEVICE_REV_MASK;
8673}
8674
8675void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8676{
8677 u32 frame;
8678
8679 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8680 *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8681 *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8682}
8683
8684static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8685 u8 *continuous)
8686{
8687 u32 frame;
8688
8689 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8690 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8691 & POWER_MANAGEMENT_MASK;
8692 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8693 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8694}
8695
8696static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8697 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8698{
8699 u32 frame;
8700
8701 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8702 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8703 *z = (frame >> Z_SHIFT) & Z_MASK;
8704 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8705 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8706 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8707}
8708
8709static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8710 u8 *remote_tx_rate,
8711 u16 *link_widths)
8712{
8713 u32 frame;
8714
8715 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008716 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008717 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8718 & REMOTE_TX_RATE_MASK;
8719 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8720}
8721
8722static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8723{
8724 u32 frame;
8725
8726 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8727 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8728}
8729
8730static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8731{
8732 u32 frame;
8733
8734 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8735 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8736}
8737
8738static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8739{
8740 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8741}
8742
8743static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8744{
8745 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8746}
8747
8748void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8749{
8750 u32 frame;
8751 int ret;
8752
8753 *link_quality = 0;
8754 if (dd->pport->host_link_state & HLS_UP) {
8755 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008756 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008757 if (ret == 0)
8758 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8759 & LINK_QUALITY_MASK;
8760 }
8761}
8762
8763static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8764{
8765 u32 frame;
8766
8767 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8768 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8769}
8770
Dean Luickfeb831d2016-04-14 08:31:36 -07008771static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8772{
8773 u32 frame;
8774
8775 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8776 *ldr = (frame & 0xff);
8777}
8778
Mike Marciniszyn77241052015-07-30 15:17:43 -04008779static int read_tx_settings(struct hfi1_devdata *dd,
8780 u8 *enable_lane_tx,
8781 u8 *tx_polarity_inversion,
8782 u8 *rx_polarity_inversion,
8783 u8 *max_rate)
8784{
8785 u32 frame;
8786 int ret;
8787
8788 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8789 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8790 & ENABLE_LANE_TX_MASK;
8791 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8792 & TX_POLARITY_INVERSION_MASK;
8793 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8794 & RX_POLARITY_INVERSION_MASK;
8795 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8796 return ret;
8797}
8798
8799static int write_tx_settings(struct hfi1_devdata *dd,
8800 u8 enable_lane_tx,
8801 u8 tx_polarity_inversion,
8802 u8 rx_polarity_inversion,
8803 u8 max_rate)
8804{
8805 u32 frame;
8806
8807 /* no need to mask, all variable sizes match field widths */
8808 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8809 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8810 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8811 | max_rate << MAX_RATE_SHIFT;
8812 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8813}
8814
Mike Marciniszyn77241052015-07-30 15:17:43 -04008815/*
8816 * Read an idle LCB message.
8817 *
8818 * Returns 0 on success, -EINVAL on error
8819 */
8820static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8821{
8822 int ret;
8823
Jubin John17fb4f22016-02-14 20:21:52 -08008824 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008825 if (ret != HCMD_SUCCESS) {
8826 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008827 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008828 return -EINVAL;
8829 }
8830 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8831 /* return only the payload as we already know the type */
8832 *data_out >>= IDLE_PAYLOAD_SHIFT;
8833 return 0;
8834}
8835
8836/*
8837 * Read an idle SMA message. To be done in response to a notification from
8838 * the 8051.
8839 *
8840 * Returns 0 on success, -EINVAL on error
8841 */
8842static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8843{
Jubin John17fb4f22016-02-14 20:21:52 -08008844 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8845 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008846}
8847
8848/*
8849 * Send an idle LCB message.
8850 *
8851 * Returns 0 on success, -EINVAL on error
8852 */
8853static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8854{
8855 int ret;
8856
8857 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8858 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8859 if (ret != HCMD_SUCCESS) {
8860 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008861 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008862 return -EINVAL;
8863 }
8864 return 0;
8865}
8866
8867/*
8868 * Send an idle SMA message.
8869 *
8870 * Returns 0 on success, -EINVAL on error
8871 */
8872int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8873{
8874 u64 data;
8875
Jubin John17fb4f22016-02-14 20:21:52 -08008876 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8877 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008878 return send_idle_message(dd, data);
8879}
8880
8881/*
8882 * Initialize the LCB then do a quick link up. This may or may not be
8883 * in loopback.
8884 *
8885 * return 0 on success, -errno on error
8886 */
8887static int do_quick_linkup(struct hfi1_devdata *dd)
8888{
8889 u64 reg;
8890 unsigned long timeout;
8891 int ret;
8892
8893 lcb_shutdown(dd, 0);
8894
8895 if (loopback) {
8896 /* LCB_CFG_LOOPBACK.VAL = 2 */
8897 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8898 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08008899 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008900 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8901 }
8902
8903 /* start the LCBs */
8904 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8905 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8906
8907 /* simulator only loopback steps */
8908 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8909 /* LCB_CFG_RUN.EN = 1 */
8910 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08008911 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008912
8913 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8914 timeout = jiffies + msecs_to_jiffies(10);
8915 while (1) {
Jubin John17fb4f22016-02-14 20:21:52 -08008916 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008917 if (reg)
8918 break;
8919 if (time_after(jiffies, timeout)) {
8920 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008921 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008922 return -ETIMEDOUT;
8923 }
8924 udelay(2);
8925 }
8926
8927 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08008928 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008929 }
8930
8931 if (!loopback) {
8932 /*
8933 * When doing quick linkup and not in loopback, both
8934 * sides must be done with LCB set-up before either
8935 * starts the quick linkup. Put a delay here so that
8936 * both sides can be started and have a chance to be
8937 * done with LCB set up before resuming.
8938 */
8939 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008940 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008941 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08008942 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04008943 }
8944
8945 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8946 set_8051_lcb_access(dd);
8947
8948 /*
8949 * State "quick" LinkUp request sets the physical link state to
8950 * LinkUp without a verify capability sequence.
8951 * This state is in simulator v37 and later.
8952 */
8953 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8954 if (ret != HCMD_SUCCESS) {
8955 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008956 "%s: set physical link state to quick LinkUp failed with return %d\n",
8957 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008958
8959 set_host_lcb_access(dd);
8960 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8961
8962 if (ret >= 0)
8963 ret = -EINVAL;
8964 return ret;
8965 }
8966
8967 return 0; /* success */
8968}
8969
8970/*
8971 * Set the SerDes to internal loopback mode.
8972 * Returns 0 on success, -errno on error.
8973 */
8974static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8975{
8976 int ret;
8977
8978 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8979 if (ret == HCMD_SUCCESS)
8980 return 0;
8981 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008982 "Set physical link state to SerDes Loopback failed with return %d\n",
8983 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008984 if (ret >= 0)
8985 ret = -EINVAL;
8986 return ret;
8987}
8988
8989/*
8990 * Do all special steps to set up loopback.
8991 */
8992static int init_loopback(struct hfi1_devdata *dd)
8993{
8994 dd_dev_info(dd, "Entering loopback mode\n");
8995
8996 /* all loopbacks should disable self GUID check */
8997 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08008998 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008999
9000 /*
9001 * The simulator has only one loopback option - LCB. Switch
9002 * to that option, which includes quick link up.
9003 *
9004 * Accept all valid loopback values.
9005 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009006 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9007 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9008 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009009 loopback = LOOPBACK_LCB;
9010 quick_linkup = 1;
9011 return 0;
9012 }
9013
9014 /* handle serdes loopback */
9015 if (loopback == LOOPBACK_SERDES) {
9016 /* internal serdes loopack needs quick linkup on RTL */
9017 if (dd->icode == ICODE_RTL_SILICON)
9018 quick_linkup = 1;
9019 return set_serdes_loopback_mode(dd);
9020 }
9021
9022 /* LCB loopback - handled at poll time */
9023 if (loopback == LOOPBACK_LCB) {
9024 quick_linkup = 1; /* LCB is always quick linkup */
9025
9026 /* not supported in emulation due to emulation RTL changes */
9027 if (dd->icode == ICODE_FPGA_EMULATION) {
9028 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009029 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009030 return -EINVAL;
9031 }
9032 return 0;
9033 }
9034
9035 /* external cable loopback requires no extra steps */
9036 if (loopback == LOOPBACK_CABLE)
9037 return 0;
9038
9039 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9040 return -EINVAL;
9041}
9042
9043/*
9044 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9045 * used in the Verify Capability link width attribute.
9046 */
9047static u16 opa_to_vc_link_widths(u16 opa_widths)
9048{
9049 int i;
9050 u16 result = 0;
9051
9052 static const struct link_bits {
9053 u16 from;
9054 u16 to;
9055 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009056 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9057 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9058 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9059 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009060 };
9061
9062 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9063 if (opa_widths & opa_link_xlate[i].from)
9064 result |= opa_link_xlate[i].to;
9065 }
9066 return result;
9067}
9068
9069/*
9070 * Set link attributes before moving to polling.
9071 */
9072static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9073{
9074 struct hfi1_devdata *dd = ppd->dd;
9075 u8 enable_lane_tx;
9076 u8 tx_polarity_inversion;
9077 u8 rx_polarity_inversion;
9078 int ret;
9079
9080 /* reset our fabric serdes to clear any lingering problems */
9081 fabric_serdes_reset(dd);
9082
9083 /* set the local tx rate - need to read-modify-write */
9084 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009085 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009086 if (ret)
9087 goto set_local_link_attributes_fail;
9088
9089 if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9090 /* set the tx rate to the fastest enabled */
9091 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9092 ppd->local_tx_rate = 1;
9093 else
9094 ppd->local_tx_rate = 0;
9095 } else {
9096 /* set the tx rate to all enabled */
9097 ppd->local_tx_rate = 0;
9098 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9099 ppd->local_tx_rate |= 2;
9100 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9101 ppd->local_tx_rate |= 1;
9102 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009103
9104 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009105 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009106 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009107 if (ret != HCMD_SUCCESS)
9108 goto set_local_link_attributes_fail;
9109
9110 /*
9111 * DC supports continuous updates.
9112 */
Jubin John17fb4f22016-02-14 20:21:52 -08009113 ret = write_vc_local_phy(dd,
9114 0 /* no power management */,
9115 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009116 if (ret != HCMD_SUCCESS)
9117 goto set_local_link_attributes_fail;
9118
9119 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9120 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9121 ppd->port_crc_mode_enabled);
9122 if (ret != HCMD_SUCCESS)
9123 goto set_local_link_attributes_fail;
9124
9125 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009126 opa_to_vc_link_widths(
9127 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009128 if (ret != HCMD_SUCCESS)
9129 goto set_local_link_attributes_fail;
9130
9131 /* let peer know who we are */
9132 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9133 if (ret == HCMD_SUCCESS)
9134 return 0;
9135
9136set_local_link_attributes_fail:
9137 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009138 "Failed to set local link attributes, return 0x%x\n",
9139 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009140 return ret;
9141}
9142
9143/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009144 * Call this to start the link.
9145 * Do not do anything if the link is disabled.
9146 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009147 */
9148int start_link(struct hfi1_pportdata *ppd)
9149{
Dean Luick0db9dec2016-09-06 04:35:20 -07009150 /*
9151 * Tune the SerDes to a ballpark setting for optimal signal and bit
9152 * error rate. Needs to be done before starting the link.
9153 */
9154 tune_serdes(ppd);
9155
Mike Marciniszyn77241052015-07-30 15:17:43 -04009156 if (!ppd->link_enabled) {
9157 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009158 "%s: stopping link start because link is disabled\n",
9159 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009160 return 0;
9161 }
9162 if (!ppd->driver_link_ready) {
9163 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009164 "%s: stopping link start because driver is not ready\n",
9165 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009166 return 0;
9167 }
9168
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009169 /*
9170 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9171 * pkey table can be configured properly if the HFI unit is connected
9172 * to switch port with MgmtAllowed=NO
9173 */
9174 clear_full_mgmt_pkey(ppd);
9175
Easwar Hariharan623bba22016-04-12 11:25:57 -07009176 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009177}
9178
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009179static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9180{
9181 struct hfi1_devdata *dd = ppd->dd;
9182 u64 mask;
9183 unsigned long timeout;
9184
9185 /*
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009186 * Some QSFP cables have a quirk that asserts the IntN line as a side
9187 * effect of power up on plug-in. We ignore this false positive
9188 * interrupt until the module has finished powering up by waiting for
9189 * a minimum timeout of the module inrush initialization time of
9190 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9191 * module have stabilized.
9192 */
9193 msleep(500);
9194
9195 /*
9196 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009197 */
9198 timeout = jiffies + msecs_to_jiffies(2000);
9199 while (1) {
9200 mask = read_csr(dd, dd->hfi1_id ?
9201 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009202 if (!(mask & QSFP_HFI0_INT_N))
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009203 break;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009204 if (time_after(jiffies, timeout)) {
9205 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9206 __func__);
9207 break;
9208 }
9209 udelay(2);
9210 }
9211}
9212
9213static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9214{
9215 struct hfi1_devdata *dd = ppd->dd;
9216 u64 mask;
9217
9218 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009219 if (enable) {
9220 /*
9221 * Clear the status register to avoid an immediate interrupt
9222 * when we re-enable the IntN pin
9223 */
9224 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9225 QSFP_HFI0_INT_N);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009226 mask |= (u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009227 } else {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009228 mask &= ~(u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009229 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009230 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9231}
9232
9233void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009234{
9235 struct hfi1_devdata *dd = ppd->dd;
9236 u64 mask, qsfp_mask;
9237
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009238 /* Disable INT_N from triggering QSFP interrupts */
9239 set_qsfp_int_n(ppd, 0);
9240
9241 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009242 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009243
9244 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009245 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009246 qsfp_mask &= ~mask;
9247 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009248 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009249
9250 udelay(10);
9251
9252 qsfp_mask |= mask;
9253 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009254 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009255
9256 wait_for_qsfp_init(ppd);
9257
9258 /*
9259 * Allow INT_N to trigger the QSFP interrupt to watch
9260 * for alarms and warnings
9261 */
9262 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009263}
9264
9265static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9266 u8 *qsfp_interrupt_status)
9267{
9268 struct hfi1_devdata *dd = ppd->dd;
9269
9270 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009271 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9272 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9273 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009274
9275 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009276 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9277 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9278 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009279
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009280 /*
9281 * The remaining alarms/warnings don't matter if the link is down.
9282 */
9283 if (ppd->host_link_state & HLS_DOWN)
9284 return 0;
9285
Mike Marciniszyn77241052015-07-30 15:17:43 -04009286 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009287 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9288 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9289 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009290
9291 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009292 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9293 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9294 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009295
9296 /* Byte 2 is vendor specific */
9297
9298 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009299 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9300 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9301 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009302
9303 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009304 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9305 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9306 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009307
9308 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009309 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9310 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9311 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009312
9313 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009314 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9315 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9316 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009317
9318 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009319 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9320 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9321 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009322
9323 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009324 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9325 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9326 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009327
9328 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009329 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9330 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9331 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009332
9333 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009334 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9335 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9336 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009337
9338 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009339 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9340 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9341 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009342
9343 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009344 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9345 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9346 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009347
9348 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009349 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9350 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9351 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009352
9353 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009354 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9355 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9356 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009357
9358 /* Bytes 9-10 and 11-12 are reserved */
9359 /* Bytes 13-15 are vendor specific */
9360
9361 return 0;
9362}
9363
Easwar Hariharan623bba22016-04-12 11:25:57 -07009364/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009365void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009366{
9367 struct qsfp_data *qd;
9368 struct hfi1_pportdata *ppd;
9369 struct hfi1_devdata *dd;
9370
9371 qd = container_of(work, struct qsfp_data, qsfp_work);
9372 ppd = qd->ppd;
9373 dd = ppd->dd;
9374
9375 /* Sanity check */
9376 if (!qsfp_mod_present(ppd))
9377 return;
9378
9379 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009380 * Turn DC back on after cable has been re-inserted. Up until
9381 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009382 */
9383 dc_start(dd);
9384
9385 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009386 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009387
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009388 wait_for_qsfp_init(ppd);
9389
9390 /*
9391 * Allow INT_N to trigger the QSFP interrupt to watch
9392 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009393 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009394 set_qsfp_int_n(ppd, 1);
9395
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009396 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009397 }
9398
9399 if (qd->check_interrupt_flags) {
9400 u8 qsfp_interrupt_status[16] = {0,};
9401
Dean Luick765a6fa2016-03-05 08:50:06 -08009402 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9403 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009404 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009405 "%s: Failed to read status of QSFP module\n",
9406 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009407 } else {
9408 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009409
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009410 handle_qsfp_error_conditions(
9411 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009412 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9413 ppd->qsfp_info.check_interrupt_flags = 0;
9414 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009415 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009416 }
9417 }
9418}
9419
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009420static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009421{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009422 struct hfi1_pportdata *ppd = dd->pport;
9423 u64 qsfp_mask, cce_int_mask;
9424 const int qsfp1_int_smask = QSFP1_INT % 64;
9425 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009426
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009427 /*
9428 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9429 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9430 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9431 * the index of the appropriate CSR in the CCEIntMask CSR array
9432 */
9433 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9434 (8 * (QSFP1_INT / 64)));
9435 if (dd->hfi1_id) {
9436 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9437 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9438 cce_int_mask);
9439 } else {
9440 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9441 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9442 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009443 }
9444
Mike Marciniszyn77241052015-07-30 15:17:43 -04009445 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9446 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009447 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9448 qsfp_mask);
9449 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9450 qsfp_mask);
9451
9452 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009453
9454 /* Handle active low nature of INT_N and MODPRST_N pins */
9455 if (qsfp_mod_present(ppd))
9456 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9457 write_csr(dd,
9458 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9459 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009460}
9461
Dean Luickbbdeb332015-12-01 15:38:15 -05009462/*
9463 * Do a one-time initialize of the LCB block.
9464 */
9465static void init_lcb(struct hfi1_devdata *dd)
9466{
Dean Luicka59329d2016-02-03 14:32:31 -08009467 /* simulator does not correctly handle LCB cclk loopback, skip */
9468 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9469 return;
9470
Dean Luickbbdeb332015-12-01 15:38:15 -05009471 /* the DC has been reset earlier in the driver load */
9472
9473 /* set LCB for cclk loopback on the port */
9474 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9475 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9476 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9477 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9478 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9479 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9480 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9481}
9482
Dean Luick673b9752016-08-31 07:24:33 -07009483/*
9484 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9485 * on error.
9486 */
9487static int test_qsfp_read(struct hfi1_pportdata *ppd)
9488{
9489 int ret;
9490 u8 status;
9491
9492 /* report success if not a QSFP */
9493 if (ppd->port_type != PORT_TYPE_QSFP)
9494 return 0;
9495
9496 /* read byte 2, the status byte */
9497 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9498 if (ret < 0)
9499 return ret;
9500 if (ret != 1)
9501 return -EIO;
9502
9503 return 0; /* success */
9504}
9505
9506/*
9507 * Values for QSFP retry.
9508 *
9509 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9510 * arrived at from experience on a large cluster.
9511 */
9512#define MAX_QSFP_RETRIES 20
9513#define QSFP_RETRY_WAIT 500 /* msec */
9514
9515/*
9516 * Try a QSFP read. If it fails, schedule a retry for later.
9517 * Called on first link activation after driver load.
9518 */
9519static void try_start_link(struct hfi1_pportdata *ppd)
9520{
9521 if (test_qsfp_read(ppd)) {
9522 /* read failed */
9523 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9524 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9525 return;
9526 }
9527 dd_dev_info(ppd->dd,
9528 "QSFP not responding, waiting and retrying %d\n",
9529 (int)ppd->qsfp_retry_count);
9530 ppd->qsfp_retry_count++;
9531 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9532 msecs_to_jiffies(QSFP_RETRY_WAIT));
9533 return;
9534 }
9535 ppd->qsfp_retry_count = 0;
9536
Dean Luick673b9752016-08-31 07:24:33 -07009537 start_link(ppd);
9538}
9539
9540/*
9541 * Workqueue function to start the link after a delay.
9542 */
9543void handle_start_link(struct work_struct *work)
9544{
9545 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9546 start_link_work.work);
9547 try_start_link(ppd);
9548}
9549
Mike Marciniszyn77241052015-07-30 15:17:43 -04009550int bringup_serdes(struct hfi1_pportdata *ppd)
9551{
9552 struct hfi1_devdata *dd = ppd->dd;
9553 u64 guid;
9554 int ret;
9555
9556 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9557 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9558
9559 guid = ppd->guid;
9560 if (!guid) {
9561 if (dd->base_guid)
9562 guid = dd->base_guid + ppd->port - 1;
9563 ppd->guid = guid;
9564 }
9565
Mike Marciniszyn77241052015-07-30 15:17:43 -04009566 /* Set linkinit_reason on power up per OPA spec */
9567 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9568
Dean Luickbbdeb332015-12-01 15:38:15 -05009569 /* one-time init of the LCB */
9570 init_lcb(dd);
9571
Mike Marciniszyn77241052015-07-30 15:17:43 -04009572 if (loopback) {
9573 ret = init_loopback(dd);
9574 if (ret < 0)
9575 return ret;
9576 }
9577
Easwar Hariharan9775a992016-05-12 10:22:39 -07009578 get_port_type(ppd);
9579 if (ppd->port_type == PORT_TYPE_QSFP) {
9580 set_qsfp_int_n(ppd, 0);
9581 wait_for_qsfp_init(ppd);
9582 set_qsfp_int_n(ppd, 1);
9583 }
9584
Dean Luick673b9752016-08-31 07:24:33 -07009585 try_start_link(ppd);
9586 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009587}
9588
9589void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9590{
9591 struct hfi1_devdata *dd = ppd->dd;
9592
9593 /*
9594 * Shut down the link and keep it down. First turn off that the
9595 * driver wants to allow the link to be up (driver_link_ready).
9596 * Then make sure the link is not automatically restarted
9597 * (link_enabled). Cancel any pending restart. And finally
9598 * go offline.
9599 */
9600 ppd->driver_link_ready = 0;
9601 ppd->link_enabled = 0;
9602
Dean Luick673b9752016-08-31 07:24:33 -07009603 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9604 flush_delayed_work(&ppd->start_link_work);
9605 cancel_delayed_work_sync(&ppd->start_link_work);
9606
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009607 ppd->offline_disabled_reason =
9608 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009609 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009610 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009611 set_link_state(ppd, HLS_DN_OFFLINE);
9612
9613 /* disable the port */
9614 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9615}
9616
9617static inline int init_cpu_counters(struct hfi1_devdata *dd)
9618{
9619 struct hfi1_pportdata *ppd;
9620 int i;
9621
9622 ppd = (struct hfi1_pportdata *)(dd + 1);
9623 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009624 ppd->ibport_data.rvp.rc_acks = NULL;
9625 ppd->ibport_data.rvp.rc_qacks = NULL;
9626 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9627 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9628 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9629 if (!ppd->ibport_data.rvp.rc_acks ||
9630 !ppd->ibport_data.rvp.rc_delayed_comp ||
9631 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009632 return -ENOMEM;
9633 }
9634
9635 return 0;
9636}
9637
9638static const char * const pt_names[] = {
9639 "expected",
9640 "eager",
9641 "invalid"
9642};
9643
9644static const char *pt_name(u32 type)
9645{
9646 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9647}
9648
9649/*
9650 * index is the index into the receive array
9651 */
9652void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9653 u32 type, unsigned long pa, u16 order)
9654{
9655 u64 reg;
9656 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9657 (dd->kregbase + RCV_ARRAY));
9658
9659 if (!(dd->flags & HFI1_PRESENT))
9660 goto done;
9661
9662 if (type == PT_INVALID) {
9663 pa = 0;
9664 } else if (type > PT_INVALID) {
9665 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009666 "unexpected receive array type %u for index %u, not handled\n",
9667 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009668 goto done;
9669 }
9670
9671 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9672 pt_name(type), index, pa, (unsigned long)order);
9673
9674#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9675 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9676 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9677 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9678 << RCV_ARRAY_RT_ADDR_SHIFT;
9679 writeq(reg, base + (index * 8));
9680
9681 if (type == PT_EAGER)
9682 /*
9683 * Eager entries are written one-by-one so we have to push them
9684 * after we write the entry.
9685 */
9686 flush_wc();
9687done:
9688 return;
9689}
9690
9691void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9692{
9693 struct hfi1_devdata *dd = rcd->dd;
9694 u32 i;
9695
9696 /* this could be optimized */
9697 for (i = rcd->eager_base; i < rcd->eager_base +
9698 rcd->egrbufs.alloced; i++)
9699 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9700
9701 for (i = rcd->expected_base;
9702 i < rcd->expected_base + rcd->expected_count; i++)
9703 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9704}
9705
Mike Marciniszyn261a4352016-09-06 04:35:05 -07009706struct ib_header *hfi1_get_msgheader(
9707 struct hfi1_devdata *dd, __le32 *rhf_addr)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009708{
9709 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9710
Mike Marciniszyn261a4352016-09-06 04:35:05 -07009711 return (struct ib_header *)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009712 (rhf_addr - dd->rhf_offset + offset);
9713}
9714
9715static const char * const ib_cfg_name_strings[] = {
9716 "HFI1_IB_CFG_LIDLMC",
9717 "HFI1_IB_CFG_LWID_DG_ENB",
9718 "HFI1_IB_CFG_LWID_ENB",
9719 "HFI1_IB_CFG_LWID",
9720 "HFI1_IB_CFG_SPD_ENB",
9721 "HFI1_IB_CFG_SPD",
9722 "HFI1_IB_CFG_RXPOL_ENB",
9723 "HFI1_IB_CFG_LREV_ENB",
9724 "HFI1_IB_CFG_LINKLATENCY",
9725 "HFI1_IB_CFG_HRTBT",
9726 "HFI1_IB_CFG_OP_VLS",
9727 "HFI1_IB_CFG_VL_HIGH_CAP",
9728 "HFI1_IB_CFG_VL_LOW_CAP",
9729 "HFI1_IB_CFG_OVERRUN_THRESH",
9730 "HFI1_IB_CFG_PHYERR_THRESH",
9731 "HFI1_IB_CFG_LINKDEFAULT",
9732 "HFI1_IB_CFG_PKEYS",
9733 "HFI1_IB_CFG_MTU",
9734 "HFI1_IB_CFG_LSTATE",
9735 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9736 "HFI1_IB_CFG_PMA_TICKS",
9737 "HFI1_IB_CFG_PORT"
9738};
9739
9740static const char *ib_cfg_name(int which)
9741{
9742 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9743 return "invalid";
9744 return ib_cfg_name_strings[which];
9745}
9746
9747int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9748{
9749 struct hfi1_devdata *dd = ppd->dd;
9750 int val = 0;
9751
9752 switch (which) {
9753 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9754 val = ppd->link_width_enabled;
9755 break;
9756 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9757 val = ppd->link_width_active;
9758 break;
9759 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9760 val = ppd->link_speed_enabled;
9761 break;
9762 case HFI1_IB_CFG_SPD: /* current Link speed */
9763 val = ppd->link_speed_active;
9764 break;
9765
9766 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9767 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9768 case HFI1_IB_CFG_LINKLATENCY:
9769 goto unimplemented;
9770
9771 case HFI1_IB_CFG_OP_VLS:
9772 val = ppd->vls_operational;
9773 break;
9774 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9775 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9776 break;
9777 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9778 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9779 break;
9780 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9781 val = ppd->overrun_threshold;
9782 break;
9783 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9784 val = ppd->phy_error_threshold;
9785 break;
9786 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9787 val = dd->link_default;
9788 break;
9789
9790 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9791 case HFI1_IB_CFG_PMA_TICKS:
9792 default:
9793unimplemented:
9794 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9795 dd_dev_info(
9796 dd,
9797 "%s: which %s: not implemented\n",
9798 __func__,
9799 ib_cfg_name(which));
9800 break;
9801 }
9802
9803 return val;
9804}
9805
9806/*
9807 * The largest MAD packet size.
9808 */
9809#define MAX_MAD_PACKET 2048
9810
9811/*
9812 * Return the maximum header bytes that can go on the _wire_
9813 * for this device. This count includes the ICRC which is
9814 * not part of the packet held in memory but it is appended
9815 * by the HW.
9816 * This is dependent on the device's receive header entry size.
9817 * HFI allows this to be set per-receive context, but the
9818 * driver presently enforces a global value.
9819 */
9820u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9821{
9822 /*
9823 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9824 * the Receive Header Entry Size minus the PBC (or RHF) size
9825 * plus one DW for the ICRC appended by HW.
9826 *
9827 * dd->rcd[0].rcvhdrqentsize is in DW.
9828 * We use rcd[0] as all context will have the same value. Also,
9829 * the first kernel context would have been allocated by now so
9830 * we are guaranteed a valid value.
9831 */
9832 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9833}
9834
9835/*
9836 * Set Send Length
9837 * @ppd - per port data
9838 *
9839 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9840 * registers compare against LRH.PktLen, so use the max bytes included
9841 * in the LRH.
9842 *
9843 * This routine changes all VL values except VL15, which it maintains at
9844 * the same value.
9845 */
9846static void set_send_length(struct hfi1_pportdata *ppd)
9847{
9848 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009849 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9850 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009851 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9852 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9853 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009854 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009855 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009856
9857 for (i = 0; i < ppd->vls_supported; i++) {
9858 if (dd->vld[i].mtu > maxvlmtu)
9859 maxvlmtu = dd->vld[i].mtu;
9860 if (i <= 3)
9861 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9862 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9863 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9864 else
9865 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9866 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9867 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9868 }
9869 write_csr(dd, SEND_LEN_CHECK0, len1);
9870 write_csr(dd, SEND_LEN_CHECK1, len2);
9871 /* adjust kernel credit return thresholds based on new MTUs */
9872 /* all kernel receive contexts have the same hdrqentsize */
9873 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009874 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9875 sc_mtu_to_threshold(dd->vld[i].sc,
9876 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009877 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009878 for (j = 0; j < INIT_SC_PER_VL; j++)
9879 sc_set_cr_threshold(
9880 pio_select_send_context_vl(dd, j, i),
9881 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009882 }
9883 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9884 sc_mtu_to_threshold(dd->vld[15].sc,
9885 dd->vld[15].mtu,
9886 dd->rcd[0]->rcvhdrqentsize));
9887 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009888
9889 /* Adjust maximum MTU for the port in DC */
9890 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9891 (ilog2(maxvlmtu >> 8) + 1);
9892 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9893 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9894 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9895 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9896 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9897}
9898
9899static void set_lidlmc(struct hfi1_pportdata *ppd)
9900{
9901 int i;
9902 u64 sreg = 0;
9903 struct hfi1_devdata *dd = ppd->dd;
9904 u32 mask = ~((1U << ppd->lmc) - 1);
9905 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9906
Mike Marciniszyn77241052015-07-30 15:17:43 -04009907 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9908 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9909 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -08009910 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -04009911 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9912 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9913 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9914
9915 /*
9916 * Iterate over all the send contexts and set their SLID check
9917 */
9918 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9919 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9920 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9921 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9922
9923 for (i = 0; i < dd->chip_send_contexts; i++) {
9924 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9925 i, (u32)sreg);
9926 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9927 }
9928
9929 /* Now we have to do the same thing for the sdma engines */
9930 sdma_update_lmc(dd, mask, ppd->lid);
9931}
9932
9933static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9934{
9935 unsigned long timeout;
9936 u32 curr_state;
9937
9938 timeout = jiffies + msecs_to_jiffies(msecs);
9939 while (1) {
9940 curr_state = read_physical_state(dd);
9941 if (curr_state == state)
9942 break;
9943 if (time_after(jiffies, timeout)) {
9944 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009945 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9946 state, curr_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009947 return -ETIMEDOUT;
9948 }
9949 usleep_range(1950, 2050); /* sleep 2ms-ish */
9950 }
9951
9952 return 0;
9953}
9954
Dean Luick6854c692016-07-25 13:38:56 -07009955static const char *state_completed_string(u32 completed)
9956{
9957 static const char * const state_completed[] = {
9958 "EstablishComm",
9959 "OptimizeEQ",
9960 "VerifyCap"
9961 };
9962
9963 if (completed < ARRAY_SIZE(state_completed))
9964 return state_completed[completed];
9965
9966 return "unknown";
9967}
9968
9969static const char all_lanes_dead_timeout_expired[] =
9970 "All lanes were inactive – was the interconnect media removed?";
9971static const char tx_out_of_policy[] =
9972 "Passing lanes on local port do not meet the local link width policy";
9973static const char no_state_complete[] =
9974 "State timeout occurred before link partner completed the state";
9975static const char * const state_complete_reasons[] = {
9976 [0x00] = "Reason unknown",
9977 [0x01] = "Link was halted by driver, refer to LinkDownReason",
9978 [0x02] = "Link partner reported failure",
9979 [0x10] = "Unable to achieve frame sync on any lane",
9980 [0x11] =
9981 "Unable to find a common bit rate with the link partner",
9982 [0x12] =
9983 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
9984 [0x13] =
9985 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
9986 [0x14] = no_state_complete,
9987 [0x15] =
9988 "State timeout occurred before link partner identified equalization presets",
9989 [0x16] =
9990 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
9991 [0x17] = tx_out_of_policy,
9992 [0x20] = all_lanes_dead_timeout_expired,
9993 [0x21] =
9994 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
9995 [0x22] = no_state_complete,
9996 [0x23] =
9997 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
9998 [0x24] = tx_out_of_policy,
9999 [0x30] = all_lanes_dead_timeout_expired,
10000 [0x31] =
10001 "State timeout occurred waiting for host to process received frames",
10002 [0x32] = no_state_complete,
10003 [0x33] =
10004 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10005 [0x34] = tx_out_of_policy,
10006};
10007
10008static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10009 u32 code)
10010{
10011 const char *str = NULL;
10012
10013 if (code < ARRAY_SIZE(state_complete_reasons))
10014 str = state_complete_reasons[code];
10015
10016 if (str)
10017 return str;
10018 return "Reserved";
10019}
10020
10021/* describe the given last state complete frame */
10022static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10023 const char *prefix)
10024{
10025 struct hfi1_devdata *dd = ppd->dd;
10026 u32 success;
10027 u32 state;
10028 u32 reason;
10029 u32 lanes;
10030
10031 /*
10032 * Decode frame:
10033 * [ 0: 0] - success
10034 * [ 3: 1] - state
10035 * [ 7: 4] - next state timeout
10036 * [15: 8] - reason code
10037 * [31:16] - lanes
10038 */
10039 success = frame & 0x1;
10040 state = (frame >> 1) & 0x7;
10041 reason = (frame >> 8) & 0xff;
10042 lanes = (frame >> 16) & 0xffff;
10043
10044 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10045 prefix, frame);
10046 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10047 state_completed_string(state), state);
10048 dd_dev_err(dd, " state successfully completed: %s\n",
10049 success ? "yes" : "no");
10050 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10051 reason, state_complete_reason_code_string(ppd, reason));
10052 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10053}
10054
10055/*
10056 * Read the last state complete frames and explain them. This routine
10057 * expects to be called if the link went down during link negotiation
10058 * and initialization (LNI). That is, anywhere between polling and link up.
10059 */
10060static void check_lni_states(struct hfi1_pportdata *ppd)
10061{
10062 u32 last_local_state;
10063 u32 last_remote_state;
10064
10065 read_last_local_state(ppd->dd, &last_local_state);
10066 read_last_remote_state(ppd->dd, &last_remote_state);
10067
10068 /*
10069 * Don't report anything if there is nothing to report. A value of
10070 * 0 means the link was taken down while polling and there was no
10071 * training in-process.
10072 */
10073 if (last_local_state == 0 && last_remote_state == 0)
10074 return;
10075
10076 decode_state_complete(ppd, last_local_state, "transmitted");
10077 decode_state_complete(ppd, last_remote_state, "received");
10078}
10079
Mike Marciniszyn77241052015-07-30 15:17:43 -040010080/*
10081 * Helper for set_link_state(). Do not call except from that routine.
10082 * Expects ppd->hls_mutex to be held.
10083 *
10084 * @rem_reason value to be sent to the neighbor
10085 *
10086 * LinkDownReasons only set if transition succeeds.
10087 */
10088static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10089{
10090 struct hfi1_devdata *dd = ppd->dd;
10091 u32 pstate, previous_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010092 int ret;
10093 int do_transition;
10094 int do_wait;
10095
10096 previous_state = ppd->host_link_state;
10097 ppd->host_link_state = HLS_GOING_OFFLINE;
10098 pstate = read_physical_state(dd);
10099 if (pstate == PLS_OFFLINE) {
10100 do_transition = 0; /* in right state */
10101 do_wait = 0; /* ...no need to wait */
10102 } else if ((pstate & 0xff) == PLS_OFFLINE) {
10103 do_transition = 0; /* in an offline transient state */
10104 do_wait = 1; /* ...wait for it to settle */
10105 } else {
10106 do_transition = 1; /* need to move to offline */
10107 do_wait = 1; /* ...will need to wait */
10108 }
10109
10110 if (do_transition) {
10111 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -080010112 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010113
10114 if (ret != HCMD_SUCCESS) {
10115 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010116 "Failed to transition to Offline link state, return %d\n",
10117 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010118 return -EINVAL;
10119 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010120 if (ppd->offline_disabled_reason ==
10121 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010122 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -080010123 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010124 }
10125
10126 if (do_wait) {
10127 /* it can take a while for the link to go down */
Dean Luickdc060242015-10-26 10:28:29 -040010128 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010129 if (ret < 0)
10130 return ret;
10131 }
10132
10133 /* make sure the logical state is also down */
10134 wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10135
10136 /*
10137 * Now in charge of LCB - must be after the physical state is
10138 * offline.quiet and before host_link_state is changed.
10139 */
10140 set_host_lcb_access(dd);
10141 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10142 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10143
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010144 if (ppd->port_type == PORT_TYPE_QSFP &&
10145 ppd->qsfp_info.limiting_active &&
10146 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -080010147 int ret;
10148
10149 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10150 if (ret == 0) {
10151 set_qsfp_tx(ppd, 0);
10152 release_chip_resource(dd, qsfp_resource(dd));
10153 } else {
10154 /* not fatal, but should warn */
10155 dd_dev_err(dd,
10156 "Unable to acquire lock to turn off QSFP TX\n");
10157 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010158 }
10159
Mike Marciniszyn77241052015-07-30 15:17:43 -040010160 /*
10161 * The LNI has a mandatory wait time after the physical state
10162 * moves to Offline.Quiet. The wait time may be different
10163 * depending on how the link went down. The 8051 firmware
10164 * will observe the needed wait time and only move to ready
10165 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010166 * is 6s, so wait that long and then at least 0.5s more for
10167 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010168 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010169 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010170 if (ret) {
10171 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010172 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010173 /* state is really offline, so make it so */
10174 ppd->host_link_state = HLS_DN_OFFLINE;
10175 return ret;
10176 }
10177
10178 /*
10179 * The state is now offline and the 8051 is ready to accept host
10180 * requests.
10181 * - change our state
10182 * - notify others if we were previously in a linkup state
10183 */
10184 ppd->host_link_state = HLS_DN_OFFLINE;
10185 if (previous_state & HLS_UP) {
10186 /* went down while link was up */
10187 handle_linkup_change(dd, 0);
10188 } else if (previous_state
10189 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10190 /* went down while attempting link up */
Dean Luick6854c692016-07-25 13:38:56 -070010191 check_lni_states(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010192 }
10193
10194 /* the active link width (downgrade) is 0 on link down */
10195 ppd->link_width_active = 0;
10196 ppd->link_width_downgrade_tx_active = 0;
10197 ppd->link_width_downgrade_rx_active = 0;
10198 ppd->current_egress_rate = 0;
10199 return 0;
10200}
10201
10202/* return the link state name */
10203static const char *link_state_name(u32 state)
10204{
10205 const char *name;
10206 int n = ilog2(state);
10207 static const char * const names[] = {
10208 [__HLS_UP_INIT_BP] = "INIT",
10209 [__HLS_UP_ARMED_BP] = "ARMED",
10210 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10211 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10212 [__HLS_DN_POLL_BP] = "POLL",
10213 [__HLS_DN_DISABLE_BP] = "DISABLE",
10214 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10215 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10216 [__HLS_GOING_UP_BP] = "GOING_UP",
10217 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10218 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10219 };
10220
10221 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10222 return name ? name : "unknown";
10223}
10224
10225/* return the link state reason name */
10226static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10227{
10228 if (state == HLS_UP_INIT) {
10229 switch (ppd->linkinit_reason) {
10230 case OPA_LINKINIT_REASON_LINKUP:
10231 return "(LINKUP)";
10232 case OPA_LINKINIT_REASON_FLAPPING:
10233 return "(FLAPPING)";
10234 case OPA_LINKINIT_OUTSIDE_POLICY:
10235 return "(OUTSIDE_POLICY)";
10236 case OPA_LINKINIT_QUARANTINED:
10237 return "(QUARANTINED)";
10238 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10239 return "(INSUFIC_CAPABILITY)";
10240 default:
10241 break;
10242 }
10243 }
10244 return "";
10245}
10246
10247/*
10248 * driver_physical_state - convert the driver's notion of a port's
10249 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10250 * Return -1 (converted to a u32) to indicate error.
10251 */
10252u32 driver_physical_state(struct hfi1_pportdata *ppd)
10253{
10254 switch (ppd->host_link_state) {
10255 case HLS_UP_INIT:
10256 case HLS_UP_ARMED:
10257 case HLS_UP_ACTIVE:
10258 return IB_PORTPHYSSTATE_LINKUP;
10259 case HLS_DN_POLL:
10260 return IB_PORTPHYSSTATE_POLLING;
10261 case HLS_DN_DISABLE:
10262 return IB_PORTPHYSSTATE_DISABLED;
10263 case HLS_DN_OFFLINE:
10264 return OPA_PORTPHYSSTATE_OFFLINE;
10265 case HLS_VERIFY_CAP:
10266 return IB_PORTPHYSSTATE_POLLING;
10267 case HLS_GOING_UP:
10268 return IB_PORTPHYSSTATE_POLLING;
10269 case HLS_GOING_OFFLINE:
10270 return OPA_PORTPHYSSTATE_OFFLINE;
10271 case HLS_LINK_COOLDOWN:
10272 return OPA_PORTPHYSSTATE_OFFLINE;
10273 case HLS_DN_DOWNDEF:
10274 default:
10275 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10276 ppd->host_link_state);
10277 return -1;
10278 }
10279}
10280
10281/*
10282 * driver_logical_state - convert the driver's notion of a port's
10283 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10284 * (converted to a u32) to indicate error.
10285 */
10286u32 driver_logical_state(struct hfi1_pportdata *ppd)
10287{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010288 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010289 return IB_PORT_DOWN;
10290
10291 switch (ppd->host_link_state & HLS_UP) {
10292 case HLS_UP_INIT:
10293 return IB_PORT_INIT;
10294 case HLS_UP_ARMED:
10295 return IB_PORT_ARMED;
10296 case HLS_UP_ACTIVE:
10297 return IB_PORT_ACTIVE;
10298 default:
10299 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10300 ppd->host_link_state);
10301 return -1;
10302 }
10303}
10304
10305void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10306 u8 neigh_reason, u8 rem_reason)
10307{
10308 if (ppd->local_link_down_reason.latest == 0 &&
10309 ppd->neigh_link_down_reason.latest == 0) {
10310 ppd->local_link_down_reason.latest = lcl_reason;
10311 ppd->neigh_link_down_reason.latest = neigh_reason;
10312 ppd->remote_link_down_reason = rem_reason;
10313 }
10314}
10315
10316/*
10317 * Change the physical and/or logical link state.
10318 *
10319 * Do not call this routine while inside an interrupt. It contains
10320 * calls to routines that can take multiple seconds to finish.
10321 *
10322 * Returns 0 on success, -errno on failure.
10323 */
10324int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10325{
10326 struct hfi1_devdata *dd = ppd->dd;
10327 struct ib_event event = {.device = NULL};
10328 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010329 int orig_new_state, poll_bounce;
10330
10331 mutex_lock(&ppd->hls_lock);
10332
10333 orig_new_state = state;
10334 if (state == HLS_DN_DOWNDEF)
10335 state = dd->link_default;
10336
10337 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010338 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10339 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010340
10341 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010342 link_state_name(ppd->host_link_state),
10343 link_state_name(orig_new_state),
10344 poll_bounce ? "(bounce) " : "",
10345 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010346
Mike Marciniszyn77241052015-07-30 15:17:43 -040010347 /*
10348 * If we're going to a (HLS_*) link state that implies the logical
10349 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10350 * reset is_sm_config_started to 0.
10351 */
10352 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10353 ppd->is_sm_config_started = 0;
10354
10355 /*
10356 * Do nothing if the states match. Let a poll to poll link bounce
10357 * go through.
10358 */
10359 if (ppd->host_link_state == state && !poll_bounce)
10360 goto done;
10361
10362 switch (state) {
10363 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010364 if (ppd->host_link_state == HLS_DN_POLL &&
10365 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010366 /*
10367 * Quick link up jumps from polling to here.
10368 *
10369 * Whether in normal or loopback mode, the
10370 * simulator jumps from polling to link up.
10371 * Accept that here.
10372 */
Jubin John17fb4f22016-02-14 20:21:52 -080010373 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010374 } else if (ppd->host_link_state != HLS_GOING_UP) {
10375 goto unexpected;
10376 }
10377
10378 ppd->host_link_state = HLS_UP_INIT;
10379 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10380 if (ret) {
10381 /* logical state didn't change, stay at going_up */
10382 ppd->host_link_state = HLS_GOING_UP;
10383 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010384 "%s: logical state did not change to INIT\n",
10385 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010386 } else {
10387 /* clear old transient LINKINIT_REASON code */
10388 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10389 ppd->linkinit_reason =
10390 OPA_LINKINIT_REASON_LINKUP;
10391
10392 /* enable the port */
10393 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10394
10395 handle_linkup_change(dd, 1);
10396 }
10397 break;
10398 case HLS_UP_ARMED:
10399 if (ppd->host_link_state != HLS_UP_INIT)
10400 goto unexpected;
10401
10402 ppd->host_link_state = HLS_UP_ARMED;
10403 set_logical_state(dd, LSTATE_ARMED);
10404 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10405 if (ret) {
10406 /* logical state didn't change, stay at init */
10407 ppd->host_link_state = HLS_UP_INIT;
10408 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010409 "%s: logical state did not change to ARMED\n",
10410 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010411 }
10412 /*
10413 * The simulator does not currently implement SMA messages,
10414 * so neighbor_normal is not set. Set it here when we first
10415 * move to Armed.
10416 */
10417 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10418 ppd->neighbor_normal = 1;
10419 break;
10420 case HLS_UP_ACTIVE:
10421 if (ppd->host_link_state != HLS_UP_ARMED)
10422 goto unexpected;
10423
10424 ppd->host_link_state = HLS_UP_ACTIVE;
10425 set_logical_state(dd, LSTATE_ACTIVE);
10426 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10427 if (ret) {
10428 /* logical state didn't change, stay at armed */
10429 ppd->host_link_state = HLS_UP_ARMED;
10430 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010431 "%s: logical state did not change to ACTIVE\n",
10432 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010433 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010434 /* tell all engines to go running */
10435 sdma_all_running(dd);
10436
10437 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010438 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010439 event.element.port_num = ppd->port;
10440 event.event = IB_EVENT_PORT_ACTIVE;
10441 }
10442 break;
10443 case HLS_DN_POLL:
10444 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10445 ppd->host_link_state == HLS_DN_OFFLINE) &&
10446 dd->dc_shutdown)
10447 dc_start(dd);
10448 /* Hand LED control to the DC */
10449 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10450
10451 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10452 u8 tmp = ppd->link_enabled;
10453
10454 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10455 if (ret) {
10456 ppd->link_enabled = tmp;
10457 break;
10458 }
10459 ppd->remote_link_down_reason = 0;
10460
10461 if (ppd->driver_link_ready)
10462 ppd->link_enabled = 1;
10463 }
10464
Jim Snowfb9036d2016-01-11 18:32:21 -050010465 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010466 ret = set_local_link_attributes(ppd);
10467 if (ret)
10468 break;
10469
10470 ppd->port_error_action = 0;
10471 ppd->host_link_state = HLS_DN_POLL;
10472
10473 if (quick_linkup) {
10474 /* quick linkup does not go into polling */
10475 ret = do_quick_linkup(dd);
10476 } else {
10477 ret1 = set_physical_link_state(dd, PLS_POLLING);
10478 if (ret1 != HCMD_SUCCESS) {
10479 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010480 "Failed to transition to Polling link state, return 0x%x\n",
10481 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010482 ret = -EINVAL;
10483 }
10484 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010485 ppd->offline_disabled_reason =
10486 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010487 /*
10488 * If an error occurred above, go back to offline. The
10489 * caller may reschedule another attempt.
10490 */
10491 if (ret)
10492 goto_offline(ppd, 0);
10493 break;
10494 case HLS_DN_DISABLE:
10495 /* link is disabled */
10496 ppd->link_enabled = 0;
10497
10498 /* allow any state to transition to disabled */
10499
10500 /* must transition to offline first */
10501 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10502 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10503 if (ret)
10504 break;
10505 ppd->remote_link_down_reason = 0;
10506 }
10507
10508 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10509 if (ret1 != HCMD_SUCCESS) {
10510 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010511 "Failed to transition to Disabled link state, return 0x%x\n",
10512 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010513 ret = -EINVAL;
10514 break;
10515 }
10516 ppd->host_link_state = HLS_DN_DISABLE;
10517 dc_shutdown(dd);
10518 break;
10519 case HLS_DN_OFFLINE:
10520 if (ppd->host_link_state == HLS_DN_DISABLE)
10521 dc_start(dd);
10522
10523 /* allow any state to transition to offline */
10524 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10525 if (!ret)
10526 ppd->remote_link_down_reason = 0;
10527 break;
10528 case HLS_VERIFY_CAP:
10529 if (ppd->host_link_state != HLS_DN_POLL)
10530 goto unexpected;
10531 ppd->host_link_state = HLS_VERIFY_CAP;
10532 break;
10533 case HLS_GOING_UP:
10534 if (ppd->host_link_state != HLS_VERIFY_CAP)
10535 goto unexpected;
10536
10537 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10538 if (ret1 != HCMD_SUCCESS) {
10539 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010540 "Failed to transition to link up state, return 0x%x\n",
10541 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010542 ret = -EINVAL;
10543 break;
10544 }
10545 ppd->host_link_state = HLS_GOING_UP;
10546 break;
10547
10548 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10549 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10550 default:
10551 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010552 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010553 ret = -EINVAL;
10554 break;
10555 }
10556
Mike Marciniszyn77241052015-07-30 15:17:43 -040010557 goto done;
10558
10559unexpected:
10560 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010561 __func__, link_state_name(ppd->host_link_state),
10562 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010563 ret = -EINVAL;
10564
10565done:
10566 mutex_unlock(&ppd->hls_lock);
10567
10568 if (event.device)
10569 ib_dispatch_event(&event);
10570
10571 return ret;
10572}
10573
10574int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10575{
10576 u64 reg;
10577 int ret = 0;
10578
10579 switch (which) {
10580 case HFI1_IB_CFG_LIDLMC:
10581 set_lidlmc(ppd);
10582 break;
10583 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10584 /*
10585 * The VL Arbitrator high limit is sent in units of 4k
10586 * bytes, while HFI stores it in units of 64 bytes.
10587 */
Jubin John8638b772016-02-14 20:19:24 -080010588 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010589 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10590 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10591 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10592 break;
10593 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10594 /* HFI only supports POLL as the default link down state */
10595 if (val != HLS_DN_POLL)
10596 ret = -EINVAL;
10597 break;
10598 case HFI1_IB_CFG_OP_VLS:
10599 if (ppd->vls_operational != val) {
10600 ppd->vls_operational = val;
10601 if (!ppd->port)
10602 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010603 }
10604 break;
10605 /*
10606 * For link width, link width downgrade, and speed enable, always AND
10607 * the setting with what is actually supported. This has two benefits.
10608 * First, enabled can't have unsupported values, no matter what the
10609 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10610 * "fill in with your supported value" have all the bits in the
10611 * field set, so simply ANDing with supported has the desired result.
10612 */
10613 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10614 ppd->link_width_enabled = val & ppd->link_width_supported;
10615 break;
10616 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10617 ppd->link_width_downgrade_enabled =
10618 val & ppd->link_width_downgrade_supported;
10619 break;
10620 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10621 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10622 break;
10623 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10624 /*
10625 * HFI does not follow IB specs, save this value
10626 * so we can report it, if asked.
10627 */
10628 ppd->overrun_threshold = val;
10629 break;
10630 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10631 /*
10632 * HFI does not follow IB specs, save this value
10633 * so we can report it, if asked.
10634 */
10635 ppd->phy_error_threshold = val;
10636 break;
10637
10638 case HFI1_IB_CFG_MTU:
10639 set_send_length(ppd);
10640 break;
10641
10642 case HFI1_IB_CFG_PKEYS:
10643 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10644 set_partition_keys(ppd);
10645 break;
10646
10647 default:
10648 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10649 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010650 "%s: which %s, val 0x%x: not implemented\n",
10651 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010652 break;
10653 }
10654 return ret;
10655}
10656
10657/* begin functions related to vl arbitration table caching */
10658static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10659{
10660 int i;
10661
10662 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10663 VL_ARB_LOW_PRIO_TABLE_SIZE);
10664 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10665 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10666
10667 /*
10668 * Note that we always return values directly from the
10669 * 'vl_arb_cache' (and do no CSR reads) in response to a
10670 * 'Get(VLArbTable)'. This is obviously correct after a
10671 * 'Set(VLArbTable)', since the cache will then be up to
10672 * date. But it's also correct prior to any 'Set(VLArbTable)'
10673 * since then both the cache, and the relevant h/w registers
10674 * will be zeroed.
10675 */
10676
10677 for (i = 0; i < MAX_PRIO_TABLE; i++)
10678 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10679}
10680
10681/*
10682 * vl_arb_lock_cache
10683 *
10684 * All other vl_arb_* functions should be called only after locking
10685 * the cache.
10686 */
10687static inline struct vl_arb_cache *
10688vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10689{
10690 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10691 return NULL;
10692 spin_lock(&ppd->vl_arb_cache[idx].lock);
10693 return &ppd->vl_arb_cache[idx];
10694}
10695
10696static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10697{
10698 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10699}
10700
10701static void vl_arb_get_cache(struct vl_arb_cache *cache,
10702 struct ib_vl_weight_elem *vl)
10703{
10704 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10705}
10706
10707static void vl_arb_set_cache(struct vl_arb_cache *cache,
10708 struct ib_vl_weight_elem *vl)
10709{
10710 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10711}
10712
10713static int vl_arb_match_cache(struct vl_arb_cache *cache,
10714 struct ib_vl_weight_elem *vl)
10715{
10716 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10717}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010718
Mike Marciniszyn77241052015-07-30 15:17:43 -040010719/* end functions related to vl arbitration table caching */
10720
10721static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10722 u32 size, struct ib_vl_weight_elem *vl)
10723{
10724 struct hfi1_devdata *dd = ppd->dd;
10725 u64 reg;
10726 unsigned int i, is_up = 0;
10727 int drain, ret = 0;
10728
10729 mutex_lock(&ppd->hls_lock);
10730
10731 if (ppd->host_link_state & HLS_UP)
10732 is_up = 1;
10733
10734 drain = !is_ax(dd) && is_up;
10735
10736 if (drain)
10737 /*
10738 * Before adjusting VL arbitration weights, empty per-VL
10739 * FIFOs, otherwise a packet whose VL weight is being
10740 * set to 0 could get stuck in a FIFO with no chance to
10741 * egress.
10742 */
10743 ret = stop_drain_data_vls(dd);
10744
10745 if (ret) {
10746 dd_dev_err(
10747 dd,
10748 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10749 __func__);
10750 goto err;
10751 }
10752
10753 for (i = 0; i < size; i++, vl++) {
10754 /*
10755 * NOTE: The low priority shift and mask are used here, but
10756 * they are the same for both the low and high registers.
10757 */
10758 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10759 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10760 | (((u64)vl->weight
10761 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10762 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10763 write_csr(dd, target + (i * 8), reg);
10764 }
10765 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10766
10767 if (drain)
10768 open_fill_data_vls(dd); /* reopen all VLs */
10769
10770err:
10771 mutex_unlock(&ppd->hls_lock);
10772
10773 return ret;
10774}
10775
10776/*
10777 * Read one credit merge VL register.
10778 */
10779static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10780 struct vl_limit *vll)
10781{
10782 u64 reg = read_csr(dd, csr);
10783
10784 vll->dedicated = cpu_to_be16(
10785 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10786 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10787 vll->shared = cpu_to_be16(
10788 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10789 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10790}
10791
10792/*
10793 * Read the current credit merge limits.
10794 */
10795static int get_buffer_control(struct hfi1_devdata *dd,
10796 struct buffer_control *bc, u16 *overall_limit)
10797{
10798 u64 reg;
10799 int i;
10800
10801 /* not all entries are filled in */
10802 memset(bc, 0, sizeof(*bc));
10803
10804 /* OPA and HFI have a 1-1 mapping */
10805 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010806 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010807
10808 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10809 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10810
10811 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10812 bc->overall_shared_limit = cpu_to_be16(
10813 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10814 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10815 if (overall_limit)
10816 *overall_limit = (reg
10817 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10818 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10819 return sizeof(struct buffer_control);
10820}
10821
10822static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10823{
10824 u64 reg;
10825 int i;
10826
10827 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10828 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10829 for (i = 0; i < sizeof(u64); i++) {
10830 u8 byte = *(((u8 *)&reg) + i);
10831
10832 dp->vlnt[2 * i] = byte & 0xf;
10833 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10834 }
10835
10836 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10837 for (i = 0; i < sizeof(u64); i++) {
10838 u8 byte = *(((u8 *)&reg) + i);
10839
10840 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10841 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10842 }
10843 return sizeof(struct sc2vlnt);
10844}
10845
10846static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10847 struct ib_vl_weight_elem *vl)
10848{
10849 unsigned int i;
10850
10851 for (i = 0; i < nelems; i++, vl++) {
10852 vl->vl = 0xf;
10853 vl->weight = 0;
10854 }
10855}
10856
10857static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10858{
10859 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080010860 DC_SC_VL_VAL(15_0,
10861 0, dp->vlnt[0] & 0xf,
10862 1, dp->vlnt[1] & 0xf,
10863 2, dp->vlnt[2] & 0xf,
10864 3, dp->vlnt[3] & 0xf,
10865 4, dp->vlnt[4] & 0xf,
10866 5, dp->vlnt[5] & 0xf,
10867 6, dp->vlnt[6] & 0xf,
10868 7, dp->vlnt[7] & 0xf,
10869 8, dp->vlnt[8] & 0xf,
10870 9, dp->vlnt[9] & 0xf,
10871 10, dp->vlnt[10] & 0xf,
10872 11, dp->vlnt[11] & 0xf,
10873 12, dp->vlnt[12] & 0xf,
10874 13, dp->vlnt[13] & 0xf,
10875 14, dp->vlnt[14] & 0xf,
10876 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010877 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080010878 DC_SC_VL_VAL(31_16,
10879 16, dp->vlnt[16] & 0xf,
10880 17, dp->vlnt[17] & 0xf,
10881 18, dp->vlnt[18] & 0xf,
10882 19, dp->vlnt[19] & 0xf,
10883 20, dp->vlnt[20] & 0xf,
10884 21, dp->vlnt[21] & 0xf,
10885 22, dp->vlnt[22] & 0xf,
10886 23, dp->vlnt[23] & 0xf,
10887 24, dp->vlnt[24] & 0xf,
10888 25, dp->vlnt[25] & 0xf,
10889 26, dp->vlnt[26] & 0xf,
10890 27, dp->vlnt[27] & 0xf,
10891 28, dp->vlnt[28] & 0xf,
10892 29, dp->vlnt[29] & 0xf,
10893 30, dp->vlnt[30] & 0xf,
10894 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010895}
10896
10897static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10898 u16 limit)
10899{
10900 if (limit != 0)
10901 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010902 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010903}
10904
10905/* change only the shared limit portion of SendCmGLobalCredit */
10906static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10907{
10908 u64 reg;
10909
10910 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10911 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10912 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10913 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10914}
10915
10916/* change only the total credit limit portion of SendCmGLobalCredit */
10917static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10918{
10919 u64 reg;
10920
10921 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10922 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10923 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10924 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10925}
10926
10927/* set the given per-VL shared limit */
10928static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10929{
10930 u64 reg;
10931 u32 addr;
10932
10933 if (vl < TXE_NUM_DATA_VL)
10934 addr = SEND_CM_CREDIT_VL + (8 * vl);
10935 else
10936 addr = SEND_CM_CREDIT_VL15;
10937
10938 reg = read_csr(dd, addr);
10939 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10940 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10941 write_csr(dd, addr, reg);
10942}
10943
10944/* set the given per-VL dedicated limit */
10945static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10946{
10947 u64 reg;
10948 u32 addr;
10949
10950 if (vl < TXE_NUM_DATA_VL)
10951 addr = SEND_CM_CREDIT_VL + (8 * vl);
10952 else
10953 addr = SEND_CM_CREDIT_VL15;
10954
10955 reg = read_csr(dd, addr);
10956 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10957 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10958 write_csr(dd, addr, reg);
10959}
10960
10961/* spin until the given per-VL status mask bits clear */
10962static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10963 const char *which)
10964{
10965 unsigned long timeout;
10966 u64 reg;
10967
10968 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10969 while (1) {
10970 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10971
10972 if (reg == 0)
10973 return; /* success */
10974 if (time_after(jiffies, timeout))
10975 break; /* timed out */
10976 udelay(1);
10977 }
10978
10979 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010980 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10981 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010982 /*
10983 * If this occurs, it is likely there was a credit loss on the link.
10984 * The only recovery from that is a link bounce.
10985 */
10986 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010987 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010988}
10989
10990/*
10991 * The number of credits on the VLs may be changed while everything
10992 * is "live", but the following algorithm must be followed due to
10993 * how the hardware is actually implemented. In particular,
10994 * Return_Credit_Status[] is the only correct status check.
10995 *
10996 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10997 * set Global_Shared_Credit_Limit = 0
10998 * use_all_vl = 1
10999 * mask0 = all VLs that are changing either dedicated or shared limits
11000 * set Shared_Limit[mask0] = 0
11001 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11002 * if (changing any dedicated limit)
11003 * mask1 = all VLs that are lowering dedicated limits
11004 * lower Dedicated_Limit[mask1]
11005 * spin until Return_Credit_Status[mask1] == 0
11006 * raise Dedicated_Limits
11007 * raise Shared_Limits
11008 * raise Global_Shared_Credit_Limit
11009 *
11010 * lower = if the new limit is lower, set the limit to the new value
11011 * raise = if the new limit is higher than the current value (may be changed
11012 * earlier in the algorithm), set the new limit to the new value
11013 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011014int set_buffer_control(struct hfi1_pportdata *ppd,
11015 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011016{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011017 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011018 u64 changing_mask, ld_mask, stat_mask;
11019 int change_count;
11020 int i, use_all_mask;
11021 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011022 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011023 /*
11024 * A0: add the variable any_shared_limit_changing below and in the
11025 * algorithm above. If removing A0 support, it can be removed.
11026 */
11027 int any_shared_limit_changing;
11028 struct buffer_control cur_bc;
11029 u8 changing[OPA_MAX_VLS];
11030 u8 lowering_dedicated[OPA_MAX_VLS];
11031 u16 cur_total;
11032 u32 new_total = 0;
11033 const u64 all_mask =
11034 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11035 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11036 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11037 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11038 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11039 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11040 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11041 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11042 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11043
11044#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11045#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11046
Mike Marciniszyn77241052015-07-30 15:17:43 -040011047 /* find the new total credits, do sanity check on unused VLs */
11048 for (i = 0; i < OPA_MAX_VLS; i++) {
11049 if (valid_vl(i)) {
11050 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11051 continue;
11052 }
11053 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080011054 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011055 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080011056 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011057 new_bc->vl[i].dedicated = 0;
11058 new_bc->vl[i].shared = 0;
11059 }
11060 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050011061
Mike Marciniszyn77241052015-07-30 15:17:43 -040011062 /* fetch the current values */
11063 get_buffer_control(dd, &cur_bc, &cur_total);
11064
11065 /*
11066 * Create the masks we will use.
11067 */
11068 memset(changing, 0, sizeof(changing));
11069 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080011070 /*
11071 * NOTE: Assumes that the individual VL bits are adjacent and in
11072 * increasing order
11073 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011074 stat_mask =
11075 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11076 changing_mask = 0;
11077 ld_mask = 0;
11078 change_count = 0;
11079 any_shared_limit_changing = 0;
11080 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11081 if (!valid_vl(i))
11082 continue;
11083 this_shared_changing = new_bc->vl[i].shared
11084 != cur_bc.vl[i].shared;
11085 if (this_shared_changing)
11086 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080011087 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11088 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011089 changing[i] = 1;
11090 changing_mask |= stat_mask;
11091 change_count++;
11092 }
11093 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11094 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11095 lowering_dedicated[i] = 1;
11096 ld_mask |= stat_mask;
11097 }
11098 }
11099
11100 /* bracket the credit change with a total adjustment */
11101 if (new_total > cur_total)
11102 set_global_limit(dd, new_total);
11103
11104 /*
11105 * Start the credit change algorithm.
11106 */
11107 use_all_mask = 0;
11108 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011109 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11110 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011111 set_global_shared(dd, 0);
11112 cur_bc.overall_shared_limit = 0;
11113 use_all_mask = 1;
11114 }
11115
11116 for (i = 0; i < NUM_USABLE_VLS; i++) {
11117 if (!valid_vl(i))
11118 continue;
11119
11120 if (changing[i]) {
11121 set_vl_shared(dd, i, 0);
11122 cur_bc.vl[i].shared = 0;
11123 }
11124 }
11125
11126 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080011127 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011128
11129 if (change_count > 0) {
11130 for (i = 0; i < NUM_USABLE_VLS; i++) {
11131 if (!valid_vl(i))
11132 continue;
11133
11134 if (lowering_dedicated[i]) {
11135 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011136 be16_to_cpu(new_bc->
11137 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011138 cur_bc.vl[i].dedicated =
11139 new_bc->vl[i].dedicated;
11140 }
11141 }
11142
11143 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11144
11145 /* now raise all dedicated that are going up */
11146 for (i = 0; i < NUM_USABLE_VLS; i++) {
11147 if (!valid_vl(i))
11148 continue;
11149
11150 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11151 be16_to_cpu(cur_bc.vl[i].dedicated))
11152 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011153 be16_to_cpu(new_bc->
11154 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011155 }
11156 }
11157
11158 /* next raise all shared that are going up */
11159 for (i = 0; i < NUM_USABLE_VLS; i++) {
11160 if (!valid_vl(i))
11161 continue;
11162
11163 if (be16_to_cpu(new_bc->vl[i].shared) >
11164 be16_to_cpu(cur_bc.vl[i].shared))
11165 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11166 }
11167
11168 /* finally raise the global shared */
11169 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011170 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011171 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011172 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011173
11174 /* bracket the credit change with a total adjustment */
11175 if (new_total < cur_total)
11176 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011177
11178 /*
11179 * Determine the actual number of operational VLS using the number of
11180 * dedicated and shared credits for each VL.
11181 */
11182 if (change_count > 0) {
11183 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11184 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11185 be16_to_cpu(new_bc->vl[i].shared) > 0)
11186 vl_count++;
11187 ppd->actual_vls_operational = vl_count;
11188 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11189 ppd->actual_vls_operational :
11190 ppd->vls_operational,
11191 NULL);
11192 if (ret == 0)
11193 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11194 ppd->actual_vls_operational :
11195 ppd->vls_operational, NULL);
11196 if (ret)
11197 return ret;
11198 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011199 return 0;
11200}
11201
11202/*
11203 * Read the given fabric manager table. Return the size of the
11204 * table (in bytes) on success, and a negative error code on
11205 * failure.
11206 */
11207int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11208
11209{
11210 int size;
11211 struct vl_arb_cache *vlc;
11212
11213 switch (which) {
11214 case FM_TBL_VL_HIGH_ARB:
11215 size = 256;
11216 /*
11217 * OPA specifies 128 elements (of 2 bytes each), though
11218 * HFI supports only 16 elements in h/w.
11219 */
11220 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11221 vl_arb_get_cache(vlc, t);
11222 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11223 break;
11224 case FM_TBL_VL_LOW_ARB:
11225 size = 256;
11226 /*
11227 * OPA specifies 128 elements (of 2 bytes each), though
11228 * HFI supports only 16 elements in h/w.
11229 */
11230 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11231 vl_arb_get_cache(vlc, t);
11232 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11233 break;
11234 case FM_TBL_BUFFER_CONTROL:
11235 size = get_buffer_control(ppd->dd, t, NULL);
11236 break;
11237 case FM_TBL_SC2VLNT:
11238 size = get_sc2vlnt(ppd->dd, t);
11239 break;
11240 case FM_TBL_VL_PREEMPT_ELEMS:
11241 size = 256;
11242 /* OPA specifies 128 elements, of 2 bytes each */
11243 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11244 break;
11245 case FM_TBL_VL_PREEMPT_MATRIX:
11246 size = 256;
11247 /*
11248 * OPA specifies that this is the same size as the VL
11249 * arbitration tables (i.e., 256 bytes).
11250 */
11251 break;
11252 default:
11253 return -EINVAL;
11254 }
11255 return size;
11256}
11257
11258/*
11259 * Write the given fabric manager table.
11260 */
11261int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11262{
11263 int ret = 0;
11264 struct vl_arb_cache *vlc;
11265
11266 switch (which) {
11267 case FM_TBL_VL_HIGH_ARB:
11268 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11269 if (vl_arb_match_cache(vlc, t)) {
11270 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11271 break;
11272 }
11273 vl_arb_set_cache(vlc, t);
11274 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11275 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11276 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11277 break;
11278 case FM_TBL_VL_LOW_ARB:
11279 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11280 if (vl_arb_match_cache(vlc, t)) {
11281 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11282 break;
11283 }
11284 vl_arb_set_cache(vlc, t);
11285 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11286 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11287 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11288 break;
11289 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011290 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011291 break;
11292 case FM_TBL_SC2VLNT:
11293 set_sc2vlnt(ppd->dd, t);
11294 break;
11295 default:
11296 ret = -EINVAL;
11297 }
11298 return ret;
11299}
11300
11301/*
11302 * Disable all data VLs.
11303 *
11304 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11305 */
11306static int disable_data_vls(struct hfi1_devdata *dd)
11307{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011308 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011309 return 1;
11310
11311 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11312
11313 return 0;
11314}
11315
11316/*
11317 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11318 * Just re-enables all data VLs (the "fill" part happens
11319 * automatically - the name was chosen for symmetry with
11320 * stop_drain_data_vls()).
11321 *
11322 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11323 */
11324int open_fill_data_vls(struct hfi1_devdata *dd)
11325{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011326 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011327 return 1;
11328
11329 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11330
11331 return 0;
11332}
11333
11334/*
11335 * drain_data_vls() - assumes that disable_data_vls() has been called,
11336 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11337 * engines to drop to 0.
11338 */
11339static void drain_data_vls(struct hfi1_devdata *dd)
11340{
11341 sc_wait(dd);
11342 sdma_wait(dd);
11343 pause_for_credit_return(dd);
11344}
11345
11346/*
11347 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11348 *
11349 * Use open_fill_data_vls() to resume using data VLs. This pair is
11350 * meant to be used like this:
11351 *
11352 * stop_drain_data_vls(dd);
11353 * // do things with per-VL resources
11354 * open_fill_data_vls(dd);
11355 */
11356int stop_drain_data_vls(struct hfi1_devdata *dd)
11357{
11358 int ret;
11359
11360 ret = disable_data_vls(dd);
11361 if (ret == 0)
11362 drain_data_vls(dd);
11363
11364 return ret;
11365}
11366
11367/*
11368 * Convert a nanosecond time to a cclock count. No matter how slow
11369 * the cclock, a non-zero ns will always have a non-zero result.
11370 */
11371u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11372{
11373 u32 cclocks;
11374
11375 if (dd->icode == ICODE_FPGA_EMULATION)
11376 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11377 else /* simulation pretends to be ASIC */
11378 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11379 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11380 cclocks = 1;
11381 return cclocks;
11382}
11383
11384/*
11385 * Convert a cclock count to nanoseconds. Not matter how slow
11386 * the cclock, a non-zero cclocks will always have a non-zero result.
11387 */
11388u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11389{
11390 u32 ns;
11391
11392 if (dd->icode == ICODE_FPGA_EMULATION)
11393 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11394 else /* simulation pretends to be ASIC */
11395 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11396 if (cclocks && !ns)
11397 ns = 1;
11398 return ns;
11399}
11400
11401/*
11402 * Dynamically adjust the receive interrupt timeout for a context based on
11403 * incoming packet rate.
11404 *
11405 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11406 */
11407static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11408{
11409 struct hfi1_devdata *dd = rcd->dd;
11410 u32 timeout = rcd->rcvavail_timeout;
11411
11412 /*
11413 * This algorithm doubles or halves the timeout depending on whether
11414 * the number of packets received in this interrupt were less than or
11415 * greater equal the interrupt count.
11416 *
11417 * The calculations below do not allow a steady state to be achieved.
11418 * Only at the endpoints it is possible to have an unchanging
11419 * timeout.
11420 */
11421 if (npkts < rcv_intr_count) {
11422 /*
11423 * Not enough packets arrived before the timeout, adjust
11424 * timeout downward.
11425 */
11426 if (timeout < 2) /* already at minimum? */
11427 return;
11428 timeout >>= 1;
11429 } else {
11430 /*
11431 * More than enough packets arrived before the timeout, adjust
11432 * timeout upward.
11433 */
11434 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11435 return;
11436 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11437 }
11438
11439 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011440 /*
11441 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11442 * been verified to be in range
11443 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011444 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011445 (u64)timeout <<
11446 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011447}
11448
11449void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11450 u32 intr_adjust, u32 npkts)
11451{
11452 struct hfi1_devdata *dd = rcd->dd;
11453 u64 reg;
11454 u32 ctxt = rcd->ctxt;
11455
11456 /*
11457 * Need to write timeout register before updating RcvHdrHead to ensure
11458 * that a new value is used when the HW decides to restart counting.
11459 */
11460 if (intr_adjust)
11461 adjust_rcv_timeout(rcd, npkts);
11462 if (updegr) {
11463 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11464 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11465 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11466 }
11467 mmiowb();
11468 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11469 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11470 << RCV_HDR_HEAD_HEAD_SHIFT);
11471 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11472 mmiowb();
11473}
11474
11475u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11476{
11477 u32 head, tail;
11478
11479 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11480 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11481
11482 if (rcd->rcvhdrtail_kvaddr)
11483 tail = get_rcvhdrtail(rcd);
11484 else
11485 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11486
11487 return head == tail;
11488}
11489
11490/*
11491 * Context Control and Receive Array encoding for buffer size:
11492 * 0x0 invalid
11493 * 0x1 4 KB
11494 * 0x2 8 KB
11495 * 0x3 16 KB
11496 * 0x4 32 KB
11497 * 0x5 64 KB
11498 * 0x6 128 KB
11499 * 0x7 256 KB
11500 * 0x8 512 KB (Receive Array only)
11501 * 0x9 1 MB (Receive Array only)
11502 * 0xa 2 MB (Receive Array only)
11503 *
11504 * 0xB-0xF - reserved (Receive Array only)
11505 *
11506 *
11507 * This routine assumes that the value has already been sanity checked.
11508 */
11509static u32 encoded_size(u32 size)
11510{
11511 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011512 case 4 * 1024: return 0x1;
11513 case 8 * 1024: return 0x2;
11514 case 16 * 1024: return 0x3;
11515 case 32 * 1024: return 0x4;
11516 case 64 * 1024: return 0x5;
11517 case 128 * 1024: return 0x6;
11518 case 256 * 1024: return 0x7;
11519 case 512 * 1024: return 0x8;
11520 case 1 * 1024 * 1024: return 0x9;
11521 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011522 }
11523 return 0x1; /* if invalid, go with the minimum size */
11524}
11525
11526void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11527{
11528 struct hfi1_ctxtdata *rcd;
11529 u64 rcvctrl, reg;
11530 int did_enable = 0;
11531
11532 rcd = dd->rcd[ctxt];
11533 if (!rcd)
11534 return;
11535
11536 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11537
11538 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11539 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011540 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11541 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011542 /* reset the tail and hdr addresses, and sequence count */
11543 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011544 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011545 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11546 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011547 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011548 rcd->seq_cnt = 1;
11549
11550 /* reset the cached receive header queue head value */
11551 rcd->head = 0;
11552
11553 /*
11554 * Zero the receive header queue so we don't get false
11555 * positives when checking the sequence number. The
11556 * sequence numbers could land exactly on the same spot.
11557 * E.g. a rcd restart before the receive header wrapped.
11558 */
11559 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11560
11561 /* starting timeout */
11562 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11563
11564 /* enable the context */
11565 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11566
11567 /* clean the egr buffer size first */
11568 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11569 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11570 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11571 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11572
11573 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11574 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11575 did_enable = 1;
11576
11577 /* zero RcvEgrIndexHead */
11578 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11579
11580 /* set eager count and base index */
11581 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11582 & RCV_EGR_CTRL_EGR_CNT_MASK)
11583 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11584 (((rcd->eager_base >> RCV_SHIFT)
11585 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11586 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11587 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11588
11589 /*
11590 * Set TID (expected) count and base index.
11591 * rcd->expected_count is set to individual RcvArray entries,
11592 * not pairs, and the CSR takes a pair-count in groups of
11593 * four, so divide by 8.
11594 */
11595 reg = (((rcd->expected_count >> RCV_SHIFT)
11596 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11597 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11598 (((rcd->expected_base >> RCV_SHIFT)
11599 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11600 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11601 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011602 if (ctxt == HFI1_CTRL_CTXT)
11603 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011604 }
11605 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11606 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011607 /*
11608 * When receive context is being disabled turn on tail
11609 * update with a dummy tail address and then disable
11610 * receive context.
11611 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011612 if (dd->rcvhdrtail_dummy_dma) {
Mark F. Brown46b010d2015-11-09 19:18:20 -050011613 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011614 dd->rcvhdrtail_dummy_dma);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011615 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011616 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11617 }
11618
Mike Marciniszyn77241052015-07-30 15:17:43 -040011619 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11620 }
11621 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11622 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11623 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11624 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011625 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011626 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011627 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11628 /* See comment on RcvCtxtCtrl.TailUpd above */
11629 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11630 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11631 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011632 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11633 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11634 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11635 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11636 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011637 /*
11638 * In one-packet-per-eager mode, the size comes from
11639 * the RcvArray entry.
11640 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011641 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11642 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11643 }
11644 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11645 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11646 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11647 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11648 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11649 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11650 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11651 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11652 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11653 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11654 rcd->rcvctrl = rcvctrl;
11655 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11656 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11657
11658 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011659 if (did_enable &&
11660 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011661 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11662 if (reg != 0) {
11663 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011664 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011665 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11666 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11667 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11668 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11669 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11670 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011671 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011672 }
11673 }
11674
11675 if (did_enable) {
11676 /*
11677 * The interrupt timeout and count must be set after
11678 * the context is enabled to take effect.
11679 */
11680 /* set interrupt timeout */
11681 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011682 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011683 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11684
11685 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11686 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11687 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11688 }
11689
11690 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11691 /*
11692 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011693 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11694 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011695 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011696 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011697 dd->rcvhdrtail_dummy_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011698}
11699
Dean Luick582e05c2016-02-18 11:13:01 -080011700u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011701{
11702 int ret;
11703 u64 val = 0;
11704
11705 if (namep) {
11706 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011707 *namep = dd->cntrnames;
11708 } else {
11709 const struct cntr_entry *entry;
11710 int i, j;
11711
11712 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011713
11714 /* Get the start of the block of counters */
11715 *cntrp = dd->cntrs;
11716
11717 /*
11718 * Now go and fill in each counter in the block.
11719 */
11720 for (i = 0; i < DEV_CNTR_LAST; i++) {
11721 entry = &dev_cntrs[i];
11722 hfi1_cdbg(CNTR, "reading %s", entry->name);
11723 if (entry->flags & CNTR_DISABLED) {
11724 /* Nothing */
11725 hfi1_cdbg(CNTR, "\tDisabled\n");
11726 } else {
11727 if (entry->flags & CNTR_VL) {
11728 hfi1_cdbg(CNTR, "\tPer VL\n");
11729 for (j = 0; j < C_VL_COUNT; j++) {
11730 val = entry->rw_cntr(entry,
11731 dd, j,
11732 CNTR_MODE_R,
11733 0);
11734 hfi1_cdbg(
11735 CNTR,
11736 "\t\tRead 0x%llx for %d\n",
11737 val, j);
11738 dd->cntrs[entry->offset + j] =
11739 val;
11740 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011741 } else if (entry->flags & CNTR_SDMA) {
11742 hfi1_cdbg(CNTR,
11743 "\t Per SDMA Engine\n");
11744 for (j = 0; j < dd->chip_sdma_engines;
11745 j++) {
11746 val =
11747 entry->rw_cntr(entry, dd, j,
11748 CNTR_MODE_R, 0);
11749 hfi1_cdbg(CNTR,
11750 "\t\tRead 0x%llx for %d\n",
11751 val, j);
11752 dd->cntrs[entry->offset + j] =
11753 val;
11754 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011755 } else {
11756 val = entry->rw_cntr(entry, dd,
11757 CNTR_INVALID_VL,
11758 CNTR_MODE_R, 0);
11759 dd->cntrs[entry->offset] = val;
11760 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11761 }
11762 }
11763 }
11764 }
11765 return ret;
11766}
11767
11768/*
11769 * Used by sysfs to create files for hfi stats to read
11770 */
Dean Luick582e05c2016-02-18 11:13:01 -080011771u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011772{
11773 int ret;
11774 u64 val = 0;
11775
11776 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011777 ret = ppd->dd->portcntrnameslen;
11778 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011779 } else {
11780 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011781 int i, j;
11782
Dean Luick582e05c2016-02-18 11:13:01 -080011783 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011784 *cntrp = ppd->cntrs;
11785
11786 for (i = 0; i < PORT_CNTR_LAST; i++) {
11787 entry = &port_cntrs[i];
11788 hfi1_cdbg(CNTR, "reading %s", entry->name);
11789 if (entry->flags & CNTR_DISABLED) {
11790 /* Nothing */
11791 hfi1_cdbg(CNTR, "\tDisabled\n");
11792 continue;
11793 }
11794
11795 if (entry->flags & CNTR_VL) {
11796 hfi1_cdbg(CNTR, "\tPer VL");
11797 for (j = 0; j < C_VL_COUNT; j++) {
11798 val = entry->rw_cntr(entry, ppd, j,
11799 CNTR_MODE_R,
11800 0);
11801 hfi1_cdbg(
11802 CNTR,
11803 "\t\tRead 0x%llx for %d",
11804 val, j);
11805 ppd->cntrs[entry->offset + j] = val;
11806 }
11807 } else {
11808 val = entry->rw_cntr(entry, ppd,
11809 CNTR_INVALID_VL,
11810 CNTR_MODE_R,
11811 0);
11812 ppd->cntrs[entry->offset] = val;
11813 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11814 }
11815 }
11816 }
11817 return ret;
11818}
11819
11820static void free_cntrs(struct hfi1_devdata *dd)
11821{
11822 struct hfi1_pportdata *ppd;
11823 int i;
11824
11825 if (dd->synth_stats_timer.data)
11826 del_timer_sync(&dd->synth_stats_timer);
11827 dd->synth_stats_timer.data = 0;
11828 ppd = (struct hfi1_pportdata *)(dd + 1);
11829 for (i = 0; i < dd->num_pports; i++, ppd++) {
11830 kfree(ppd->cntrs);
11831 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011832 free_percpu(ppd->ibport_data.rvp.rc_acks);
11833 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11834 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011835 ppd->cntrs = NULL;
11836 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011837 ppd->ibport_data.rvp.rc_acks = NULL;
11838 ppd->ibport_data.rvp.rc_qacks = NULL;
11839 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011840 }
11841 kfree(dd->portcntrnames);
11842 dd->portcntrnames = NULL;
11843 kfree(dd->cntrs);
11844 dd->cntrs = NULL;
11845 kfree(dd->scntrs);
11846 dd->scntrs = NULL;
11847 kfree(dd->cntrnames);
11848 dd->cntrnames = NULL;
11849}
11850
Mike Marciniszyn77241052015-07-30 15:17:43 -040011851static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11852 u64 *psval, void *context, int vl)
11853{
11854 u64 val;
11855 u64 sval = *psval;
11856
11857 if (entry->flags & CNTR_DISABLED) {
11858 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11859 return 0;
11860 }
11861
11862 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11863
11864 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11865
11866 /* If its a synthetic counter there is more work we need to do */
11867 if (entry->flags & CNTR_SYNTH) {
11868 if (sval == CNTR_MAX) {
11869 /* No need to read already saturated */
11870 return CNTR_MAX;
11871 }
11872
11873 if (entry->flags & CNTR_32BIT) {
11874 /* 32bit counters can wrap multiple times */
11875 u64 upper = sval >> 32;
11876 u64 lower = (sval << 32) >> 32;
11877
11878 if (lower > val) { /* hw wrapped */
11879 if (upper == CNTR_32BIT_MAX)
11880 val = CNTR_MAX;
11881 else
11882 upper++;
11883 }
11884
11885 if (val != CNTR_MAX)
11886 val = (upper << 32) | val;
11887
11888 } else {
11889 /* If we rolled we are saturated */
11890 if ((val < sval) || (val > CNTR_MAX))
11891 val = CNTR_MAX;
11892 }
11893 }
11894
11895 *psval = val;
11896
11897 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11898
11899 return val;
11900}
11901
11902static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11903 struct cntr_entry *entry,
11904 u64 *psval, void *context, int vl, u64 data)
11905{
11906 u64 val;
11907
11908 if (entry->flags & CNTR_DISABLED) {
11909 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11910 return 0;
11911 }
11912
11913 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11914
11915 if (entry->flags & CNTR_SYNTH) {
11916 *psval = data;
11917 if (entry->flags & CNTR_32BIT) {
11918 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11919 (data << 32) >> 32);
11920 val = data; /* return the full 64bit value */
11921 } else {
11922 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11923 data);
11924 }
11925 } else {
11926 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11927 }
11928
11929 *psval = val;
11930
11931 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11932
11933 return val;
11934}
11935
11936u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11937{
11938 struct cntr_entry *entry;
11939 u64 *sval;
11940
11941 entry = &dev_cntrs[index];
11942 sval = dd->scntrs + entry->offset;
11943
11944 if (vl != CNTR_INVALID_VL)
11945 sval += vl;
11946
11947 return read_dev_port_cntr(dd, entry, sval, dd, vl);
11948}
11949
11950u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11951{
11952 struct cntr_entry *entry;
11953 u64 *sval;
11954
11955 entry = &dev_cntrs[index];
11956 sval = dd->scntrs + entry->offset;
11957
11958 if (vl != CNTR_INVALID_VL)
11959 sval += vl;
11960
11961 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11962}
11963
11964u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11965{
11966 struct cntr_entry *entry;
11967 u64 *sval;
11968
11969 entry = &port_cntrs[index];
11970 sval = ppd->scntrs + entry->offset;
11971
11972 if (vl != CNTR_INVALID_VL)
11973 sval += vl;
11974
11975 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11976 (index <= C_RCV_HDR_OVF_LAST)) {
11977 /* We do not want to bother for disabled contexts */
11978 return 0;
11979 }
11980
11981 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11982}
11983
11984u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11985{
11986 struct cntr_entry *entry;
11987 u64 *sval;
11988
11989 entry = &port_cntrs[index];
11990 sval = ppd->scntrs + entry->offset;
11991
11992 if (vl != CNTR_INVALID_VL)
11993 sval += vl;
11994
11995 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11996 (index <= C_RCV_HDR_OVF_LAST)) {
11997 /* We do not want to bother for disabled contexts */
11998 return 0;
11999 }
12000
12001 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12002}
12003
12004static void update_synth_timer(unsigned long opaque)
12005{
12006 u64 cur_tx;
12007 u64 cur_rx;
12008 u64 total_flits;
12009 u8 update = 0;
12010 int i, j, vl;
12011 struct hfi1_pportdata *ppd;
12012 struct cntr_entry *entry;
12013
12014 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12015
12016 /*
12017 * Rather than keep beating on the CSRs pick a minimal set that we can
12018 * check to watch for potential roll over. We can do this by looking at
12019 * the number of flits sent/recv. If the total flits exceeds 32bits then
12020 * we have to iterate all the counters and update.
12021 */
12022 entry = &dev_cntrs[C_DC_RCV_FLITS];
12023 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12024
12025 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12026 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12027
12028 hfi1_cdbg(
12029 CNTR,
12030 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12031 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12032
12033 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12034 /*
12035 * May not be strictly necessary to update but it won't hurt and
12036 * simplifies the logic here.
12037 */
12038 update = 1;
12039 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12040 dd->unit);
12041 } else {
12042 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12043 hfi1_cdbg(CNTR,
12044 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12045 total_flits, (u64)CNTR_32BIT_MAX);
12046 if (total_flits >= CNTR_32BIT_MAX) {
12047 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12048 dd->unit);
12049 update = 1;
12050 }
12051 }
12052
12053 if (update) {
12054 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12055 for (i = 0; i < DEV_CNTR_LAST; i++) {
12056 entry = &dev_cntrs[i];
12057 if (entry->flags & CNTR_VL) {
12058 for (vl = 0; vl < C_VL_COUNT; vl++)
12059 read_dev_cntr(dd, i, vl);
12060 } else {
12061 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12062 }
12063 }
12064 ppd = (struct hfi1_pportdata *)(dd + 1);
12065 for (i = 0; i < dd->num_pports; i++, ppd++) {
12066 for (j = 0; j < PORT_CNTR_LAST; j++) {
12067 entry = &port_cntrs[j];
12068 if (entry->flags & CNTR_VL) {
12069 for (vl = 0; vl < C_VL_COUNT; vl++)
12070 read_port_cntr(ppd, j, vl);
12071 } else {
12072 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12073 }
12074 }
12075 }
12076
12077 /*
12078 * We want the value in the register. The goal is to keep track
12079 * of the number of "ticks" not the counter value. In other
12080 * words if the register rolls we want to notice it and go ahead
12081 * and force an update.
12082 */
12083 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12084 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12085 CNTR_MODE_R, 0);
12086
12087 entry = &dev_cntrs[C_DC_RCV_FLITS];
12088 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12089 CNTR_MODE_R, 0);
12090
12091 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12092 dd->unit, dd->last_tx, dd->last_rx);
12093
12094 } else {
12095 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12096 }
12097
Bart Van Assche48a0cc132016-06-03 12:09:56 -070012098 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012099}
12100
Jianxin Xiong09a79082016-10-25 13:12:40 -070012101#define C_MAX_NAME 16 /* 15 chars + one for /0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012102static int init_cntrs(struct hfi1_devdata *dd)
12103{
Dean Luickc024c552016-01-11 18:30:57 -050012104 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012105 size_t sz;
12106 char *p;
12107 char name[C_MAX_NAME];
12108 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012109 const char *bit_type_32 = ",32";
12110 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012111
12112 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053012113 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12114 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012115
12116 /***********************/
12117 /* per device counters */
12118 /***********************/
12119
12120 /* size names and determine how many we have*/
12121 dd->ndevcntrs = 0;
12122 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012123
12124 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012125 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12126 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12127 continue;
12128 }
12129
12130 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050012131 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012132 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012133 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012134 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012135 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012136 /* Add ",32" for 32-bit counters */
12137 if (dev_cntrs[i].flags & CNTR_32BIT)
12138 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012139 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012140 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012141 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012142 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050012143 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012144 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012145 snprintf(name, C_MAX_NAME, "%s%d",
12146 dev_cntrs[i].name, j);
12147 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012148 /* Add ",32" for 32-bit counters */
12149 if (dev_cntrs[i].flags & CNTR_32BIT)
12150 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012151 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012152 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012153 }
12154 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012155 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012156 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012157 /* Add ",32" for 32-bit counters */
12158 if (dev_cntrs[i].flags & CNTR_32BIT)
12159 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012160 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012161 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012162 }
12163 }
12164
12165 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012166 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012167 if (!dd->cntrs)
12168 goto bail;
12169
Dean Luickc024c552016-01-11 18:30:57 -050012170 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012171 if (!dd->scntrs)
12172 goto bail;
12173
Mike Marciniszyn77241052015-07-30 15:17:43 -040012174 /* allocate space for the counter names */
12175 dd->cntrnameslen = sz;
12176 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12177 if (!dd->cntrnames)
12178 goto bail;
12179
12180 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012181 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012182 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12183 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012184 } else if (dev_cntrs[i].flags & CNTR_VL) {
12185 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012186 snprintf(name, C_MAX_NAME, "%s%d",
12187 dev_cntrs[i].name,
12188 vl_from_idx(j));
12189 memcpy(p, name, strlen(name));
12190 p += strlen(name);
12191
12192 /* Counter is 32 bits */
12193 if (dev_cntrs[i].flags & CNTR_32BIT) {
12194 memcpy(p, bit_type_32, bit_type_32_sz);
12195 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012196 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012197
Mike Marciniszyn77241052015-07-30 15:17:43 -040012198 *p++ = '\n';
12199 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012200 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12201 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012202 snprintf(name, C_MAX_NAME, "%s%d",
12203 dev_cntrs[i].name, j);
12204 memcpy(p, name, strlen(name));
12205 p += strlen(name);
12206
12207 /* Counter is 32 bits */
12208 if (dev_cntrs[i].flags & CNTR_32BIT) {
12209 memcpy(p, bit_type_32, bit_type_32_sz);
12210 p += bit_type_32_sz;
12211 }
12212
12213 *p++ = '\n';
12214 }
12215 } else {
12216 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12217 p += strlen(dev_cntrs[i].name);
12218
12219 /* Counter is 32 bits */
12220 if (dev_cntrs[i].flags & CNTR_32BIT) {
12221 memcpy(p, bit_type_32, bit_type_32_sz);
12222 p += bit_type_32_sz;
12223 }
12224
12225 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012226 }
12227 }
12228
12229 /*********************/
12230 /* per port counters */
12231 /*********************/
12232
12233 /*
12234 * Go through the counters for the overflows and disable the ones we
12235 * don't need. This varies based on platform so we need to do it
12236 * dynamically here.
12237 */
12238 rcv_ctxts = dd->num_rcv_contexts;
12239 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12240 i <= C_RCV_HDR_OVF_LAST; i++) {
12241 port_cntrs[i].flags |= CNTR_DISABLED;
12242 }
12243
12244 /* size port counter names and determine how many we have*/
12245 sz = 0;
12246 dd->nportcntrs = 0;
12247 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012248 if (port_cntrs[i].flags & CNTR_DISABLED) {
12249 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12250 continue;
12251 }
12252
12253 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012254 port_cntrs[i].offset = dd->nportcntrs;
12255 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012256 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012257 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012258 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012259 /* Add ",32" for 32-bit counters */
12260 if (port_cntrs[i].flags & CNTR_32BIT)
12261 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012262 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012263 dd->nportcntrs++;
12264 }
12265 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012266 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012267 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012268 /* Add ",32" for 32-bit counters */
12269 if (port_cntrs[i].flags & CNTR_32BIT)
12270 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012271 port_cntrs[i].offset = dd->nportcntrs;
12272 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012273 }
12274 }
12275
12276 /* allocate space for the counter names */
12277 dd->portcntrnameslen = sz;
12278 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12279 if (!dd->portcntrnames)
12280 goto bail;
12281
12282 /* fill in port cntr names */
12283 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12284 if (port_cntrs[i].flags & CNTR_DISABLED)
12285 continue;
12286
12287 if (port_cntrs[i].flags & CNTR_VL) {
12288 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012289 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012290 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012291 memcpy(p, name, strlen(name));
12292 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012293
12294 /* Counter is 32 bits */
12295 if (port_cntrs[i].flags & CNTR_32BIT) {
12296 memcpy(p, bit_type_32, bit_type_32_sz);
12297 p += bit_type_32_sz;
12298 }
12299
Mike Marciniszyn77241052015-07-30 15:17:43 -040012300 *p++ = '\n';
12301 }
12302 } else {
12303 memcpy(p, port_cntrs[i].name,
12304 strlen(port_cntrs[i].name));
12305 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012306
12307 /* Counter is 32 bits */
12308 if (port_cntrs[i].flags & CNTR_32BIT) {
12309 memcpy(p, bit_type_32, bit_type_32_sz);
12310 p += bit_type_32_sz;
12311 }
12312
Mike Marciniszyn77241052015-07-30 15:17:43 -040012313 *p++ = '\n';
12314 }
12315 }
12316
12317 /* allocate per port storage for counter values */
12318 ppd = (struct hfi1_pportdata *)(dd + 1);
12319 for (i = 0; i < dd->num_pports; i++, ppd++) {
12320 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12321 if (!ppd->cntrs)
12322 goto bail;
12323
12324 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12325 if (!ppd->scntrs)
12326 goto bail;
12327 }
12328
12329 /* CPU counters need to be allocated and zeroed */
12330 if (init_cpu_counters(dd))
12331 goto bail;
12332
12333 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12334 return 0;
12335bail:
12336 free_cntrs(dd);
12337 return -ENOMEM;
12338}
12339
Mike Marciniszyn77241052015-07-30 15:17:43 -040012340static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12341{
12342 switch (chip_lstate) {
12343 default:
12344 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012345 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12346 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012347 /* fall through */
12348 case LSTATE_DOWN:
12349 return IB_PORT_DOWN;
12350 case LSTATE_INIT:
12351 return IB_PORT_INIT;
12352 case LSTATE_ARMED:
12353 return IB_PORT_ARMED;
12354 case LSTATE_ACTIVE:
12355 return IB_PORT_ACTIVE;
12356 }
12357}
12358
12359u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12360{
12361 /* look at the HFI meta-states only */
12362 switch (chip_pstate & 0xf0) {
12363 default:
12364 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012365 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012366 /* fall through */
12367 case PLS_DISABLED:
12368 return IB_PORTPHYSSTATE_DISABLED;
12369 case PLS_OFFLINE:
12370 return OPA_PORTPHYSSTATE_OFFLINE;
12371 case PLS_POLLING:
12372 return IB_PORTPHYSSTATE_POLLING;
12373 case PLS_CONFIGPHY:
12374 return IB_PORTPHYSSTATE_TRAINING;
12375 case PLS_LINKUP:
12376 return IB_PORTPHYSSTATE_LINKUP;
12377 case PLS_PHYTEST:
12378 return IB_PORTPHYSSTATE_PHY_TEST;
12379 }
12380}
12381
12382/* return the OPA port logical state name */
12383const char *opa_lstate_name(u32 lstate)
12384{
12385 static const char * const port_logical_names[] = {
12386 "PORT_NOP",
12387 "PORT_DOWN",
12388 "PORT_INIT",
12389 "PORT_ARMED",
12390 "PORT_ACTIVE",
12391 "PORT_ACTIVE_DEFER",
12392 };
12393 if (lstate < ARRAY_SIZE(port_logical_names))
12394 return port_logical_names[lstate];
12395 return "unknown";
12396}
12397
12398/* return the OPA port physical state name */
12399const char *opa_pstate_name(u32 pstate)
12400{
12401 static const char * const port_physical_names[] = {
12402 "PHYS_NOP",
12403 "reserved1",
12404 "PHYS_POLL",
12405 "PHYS_DISABLED",
12406 "PHYS_TRAINING",
12407 "PHYS_LINKUP",
12408 "PHYS_LINK_ERR_RECOVER",
12409 "PHYS_PHY_TEST",
12410 "reserved8",
12411 "PHYS_OFFLINE",
12412 "PHYS_GANGED",
12413 "PHYS_TEST",
12414 };
12415 if (pstate < ARRAY_SIZE(port_physical_names))
12416 return port_physical_names[pstate];
12417 return "unknown";
12418}
12419
12420/*
12421 * Read the hardware link state and set the driver's cached value of it.
12422 * Return the (new) current value.
12423 */
12424u32 get_logical_state(struct hfi1_pportdata *ppd)
12425{
12426 u32 new_state;
12427
12428 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12429 if (new_state != ppd->lstate) {
12430 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012431 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012432 ppd->lstate = new_state;
12433 }
12434 /*
12435 * Set port status flags in the page mapped into userspace
12436 * memory. Do it here to ensure a reliable state - this is
12437 * the only function called by all state handling code.
12438 * Always set the flags due to the fact that the cache value
12439 * might have been changed explicitly outside of this
12440 * function.
12441 */
12442 if (ppd->statusp) {
12443 switch (ppd->lstate) {
12444 case IB_PORT_DOWN:
12445 case IB_PORT_INIT:
12446 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12447 HFI1_STATUS_IB_READY);
12448 break;
12449 case IB_PORT_ARMED:
12450 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12451 break;
12452 case IB_PORT_ACTIVE:
12453 *ppd->statusp |= HFI1_STATUS_IB_READY;
12454 break;
12455 }
12456 }
12457 return ppd->lstate;
12458}
12459
12460/**
12461 * wait_logical_linkstate - wait for an IB link state change to occur
12462 * @ppd: port device
12463 * @state: the state to wait for
12464 * @msecs: the number of milliseconds to wait
12465 *
12466 * Wait up to msecs milliseconds for IB link state change to occur.
12467 * For now, take the easy polling route.
12468 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12469 */
12470static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12471 int msecs)
12472{
12473 unsigned long timeout;
12474
12475 timeout = jiffies + msecs_to_jiffies(msecs);
12476 while (1) {
12477 if (get_logical_state(ppd) == state)
12478 return 0;
12479 if (time_after(jiffies, timeout))
12480 break;
12481 msleep(20);
12482 }
12483 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12484
12485 return -ETIMEDOUT;
12486}
12487
12488u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12489{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012490 u32 pstate;
12491 u32 ib_pstate;
12492
12493 pstate = read_physical_state(ppd->dd);
12494 ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012495 if (ppd->last_pstate != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012496 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012497 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12498 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12499 pstate);
Dean Luickf45c8dc2016-02-03 14:35:31 -080012500 ppd->last_pstate = ib_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012501 }
12502 return ib_pstate;
12503}
12504
Mike Marciniszyn77241052015-07-30 15:17:43 -040012505#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12506(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12507
12508#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12509(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12510
12511int hfi1_init_ctxt(struct send_context *sc)
12512{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012513 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012514 struct hfi1_devdata *dd = sc->dd;
12515 u64 reg;
12516 u8 set = (sc->type == SC_USER ?
12517 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12518 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12519 reg = read_kctxt_csr(dd, sc->hw_context,
12520 SEND_CTXT_CHECK_ENABLE);
12521 if (set)
12522 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12523 else
12524 SET_STATIC_RATE_CONTROL_SMASK(reg);
12525 write_kctxt_csr(dd, sc->hw_context,
12526 SEND_CTXT_CHECK_ENABLE, reg);
12527 }
12528 return 0;
12529}
12530
12531int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12532{
12533 int ret = 0;
12534 u64 reg;
12535
12536 if (dd->icode != ICODE_RTL_SILICON) {
12537 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12538 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12539 __func__);
12540 return -EINVAL;
12541 }
12542 reg = read_csr(dd, ASIC_STS_THERM);
12543 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12544 ASIC_STS_THERM_CURR_TEMP_MASK);
12545 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12546 ASIC_STS_THERM_LO_TEMP_MASK);
12547 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12548 ASIC_STS_THERM_HI_TEMP_MASK);
12549 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12550 ASIC_STS_THERM_CRIT_TEMP_MASK);
12551 /* triggers is a 3-bit value - 1 bit per trigger. */
12552 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12553
12554 return ret;
12555}
12556
12557/* ========================================================================= */
12558
12559/*
12560 * Enable/disable chip from delivering interrupts.
12561 */
12562void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12563{
12564 int i;
12565
12566 /*
12567 * In HFI, the mask needs to be 1 to allow interrupts.
12568 */
12569 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012570 /* enable all interrupts */
12571 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012572 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012573
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012574 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012575 } else {
12576 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012577 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012578 }
12579}
12580
12581/*
12582 * Clear all interrupt sources on the chip.
12583 */
12584static void clear_all_interrupts(struct hfi1_devdata *dd)
12585{
12586 int i;
12587
12588 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012589 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012590
12591 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12592 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12593 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12594 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12595 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12596 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12597 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12598 for (i = 0; i < dd->chip_send_contexts; i++)
12599 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12600 for (i = 0; i < dd->chip_sdma_engines; i++)
12601 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12602
12603 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12604 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12605 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12606}
12607
12608/* Move to pcie.c? */
12609static void disable_intx(struct pci_dev *pdev)
12610{
12611 pci_intx(pdev, 0);
12612}
12613
12614static void clean_up_interrupts(struct hfi1_devdata *dd)
12615{
12616 int i;
12617
12618 /* remove irqs - must happen before disabling/turning off */
12619 if (dd->num_msix_entries) {
12620 /* MSI-X */
12621 struct hfi1_msix_entry *me = dd->msix_entries;
12622
12623 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012624 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012625 continue;
12626 hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012627 free_irq(me->msix.vector, me->arg);
12628 }
12629 } else {
12630 /* INTx */
12631 if (dd->requested_intx_irq) {
12632 free_irq(dd->pcidev->irq, dd);
12633 dd->requested_intx_irq = 0;
12634 }
12635 }
12636
12637 /* turn off interrupts */
12638 if (dd->num_msix_entries) {
12639 /* MSI-X */
Amitoj Kaur Chawla6e5b6132015-11-01 16:14:32 +053012640 pci_disable_msix(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012641 } else {
12642 /* INTx */
12643 disable_intx(dd->pcidev);
12644 }
12645
12646 /* clean structures */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012647 kfree(dd->msix_entries);
12648 dd->msix_entries = NULL;
12649 dd->num_msix_entries = 0;
12650}
12651
12652/*
12653 * Remap the interrupt source from the general handler to the given MSI-X
12654 * interrupt.
12655 */
12656static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12657{
12658 u64 reg;
12659 int m, n;
12660
12661 /* clear from the handled mask of the general interrupt */
12662 m = isrc / 64;
12663 n = isrc % 64;
12664 dd->gi_mask[m] &= ~((u64)1 << n);
12665
12666 /* direct the chip source to the given MSI-X interrupt */
12667 m = isrc / 8;
12668 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012669 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12670 reg &= ~((u64)0xff << (8 * n));
12671 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12672 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012673}
12674
12675static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12676 int engine, int msix_intr)
12677{
12678 /*
12679 * SDMA engine interrupt sources grouped by type, rather than
12680 * engine. Per-engine interrupts are as follows:
12681 * SDMA
12682 * SDMAProgress
12683 * SDMAIdle
12684 */
Jubin John8638b772016-02-14 20:19:24 -080012685 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012686 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012687 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012688 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012689 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012690 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012691}
12692
Mike Marciniszyn77241052015-07-30 15:17:43 -040012693static int request_intx_irq(struct hfi1_devdata *dd)
12694{
12695 int ret;
12696
Jubin John98050712015-11-16 21:59:27 -050012697 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12698 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012699 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012700 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012701 if (ret)
12702 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012703 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012704 else
12705 dd->requested_intx_irq = 1;
12706 return ret;
12707}
12708
12709static int request_msix_irqs(struct hfi1_devdata *dd)
12710{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012711 int first_general, last_general;
12712 int first_sdma, last_sdma;
12713 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012714 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012715
12716 /* calculate the ranges we are going to use */
12717 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012718 last_general = first_general + 1;
12719 first_sdma = last_general;
12720 last_sdma = first_sdma + dd->num_sdma;
12721 first_rx = last_sdma;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012722 last_rx = first_rx + dd->n_krcv_queues;
12723
12724 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012725 * Sanity check - the code expects all SDMA chip source
12726 * interrupts to be in the same CSR, starting at bit 0. Verify
12727 * that this is true by checking the bit location of the start.
12728 */
12729 BUILD_BUG_ON(IS_SDMA_START % 64);
12730
12731 for (i = 0; i < dd->num_msix_entries; i++) {
12732 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12733 const char *err_info;
12734 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012735 irq_handler_t thread = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012736 void *arg;
12737 int idx;
12738 struct hfi1_ctxtdata *rcd = NULL;
12739 struct sdma_engine *sde = NULL;
12740
12741 /* obtain the arguments to request_irq */
12742 if (first_general <= i && i < last_general) {
12743 idx = i - first_general;
12744 handler = general_interrupt;
12745 arg = dd;
12746 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012747 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012748 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012749 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012750 } else if (first_sdma <= i && i < last_sdma) {
12751 idx = i - first_sdma;
12752 sde = &dd->per_sdma[idx];
12753 handler = sdma_interrupt;
12754 arg = sde;
12755 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012756 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012757 err_info = "sdma";
12758 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012759 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012760 } else if (first_rx <= i && i < last_rx) {
12761 idx = i - first_rx;
12762 rcd = dd->rcd[idx];
12763 /* no interrupt if no rcd */
12764 if (!rcd)
12765 continue;
12766 /*
12767 * Set the interrupt register and mask for this
12768 * context's interrupt.
12769 */
Jubin John8638b772016-02-14 20:19:24 -080012770 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012771 rcd->imask = ((u64)1) <<
Jubin John8638b772016-02-14 20:19:24 -080012772 ((IS_RCVAVAIL_START + idx) % 64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012773 handler = receive_context_interrupt;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012774 thread = receive_context_thread;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012775 arg = rcd;
12776 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012777 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012778 err_info = "receive context";
Amitoj Kaur Chawla66c09332015-11-01 16:18:18 +053012779 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012780 me->type = IRQ_RCVCTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012781 } else {
12782 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080012783 * ignore it
12784 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012785 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012786 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012787 continue;
12788 }
12789 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080012790 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012791 continue;
12792 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080012793 me->name[sizeof(me->name) - 1] = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012794
Dean Luickf4f30031c2015-10-26 10:28:44 -040012795 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080012796 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012797 if (ret) {
12798 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012799 "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12800 err_info, me->msix.vector, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012801 return ret;
12802 }
12803 /*
12804 * assign arg after request_irq call, so it will be
12805 * cleaned up
12806 */
12807 me->arg = arg;
12808
Mitko Haralanov957558c2016-02-03 14:33:40 -080012809 ret = hfi1_get_irq_affinity(dd, me);
12810 if (ret)
12811 dd_dev_err(dd,
12812 "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012813 }
12814
Mike Marciniszyn77241052015-07-30 15:17:43 -040012815 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012816}
12817
12818/*
12819 * Set the general handler to accept all interrupts, remap all
12820 * chip interrupts back to MSI-X 0.
12821 */
12822static void reset_interrupts(struct hfi1_devdata *dd)
12823{
12824 int i;
12825
12826 /* all interrupts handled by the general handler */
12827 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12828 dd->gi_mask[i] = ~(u64)0;
12829
12830 /* all chip interrupts map to MSI-X 0 */
12831 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012832 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012833}
12834
12835static int set_up_interrupts(struct hfi1_devdata *dd)
12836{
12837 struct hfi1_msix_entry *entries;
12838 u32 total, request;
12839 int i, ret;
12840 int single_interrupt = 0; /* we expect to have all the interrupts */
12841
12842 /*
12843 * Interrupt count:
12844 * 1 general, "slow path" interrupt (includes the SDMA engines
12845 * slow source, SDMACleanupDone)
12846 * N interrupts - one per used SDMA engine
12847 * M interrupt - one per kernel receive context
12848 */
12849 total = 1 + dd->num_sdma + dd->n_krcv_queues;
12850
12851 entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12852 if (!entries) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012853 ret = -ENOMEM;
12854 goto fail;
12855 }
12856 /* 1-1 MSI-X entry assignment */
12857 for (i = 0; i < total; i++)
12858 entries[i].msix.entry = i;
12859
12860 /* ask for MSI-X interrupts */
12861 request = total;
12862 request_msix(dd, &request, entries);
12863
12864 if (request == 0) {
12865 /* using INTx */
12866 /* dd->num_msix_entries already zero */
12867 kfree(entries);
12868 single_interrupt = 1;
12869 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12870 } else {
12871 /* using MSI-X */
12872 dd->num_msix_entries = request;
12873 dd->msix_entries = entries;
12874
12875 if (request != total) {
12876 /* using MSI-X, with reduced interrupts */
12877 dd_dev_err(
12878 dd,
12879 "cannot handle reduced interrupt case, want %u, got %u\n",
12880 total, request);
12881 ret = -EINVAL;
12882 goto fail;
12883 }
12884 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12885 }
12886
12887 /* mask all interrupts */
12888 set_intr_state(dd, 0);
12889 /* clear all pending interrupts */
12890 clear_all_interrupts(dd);
12891
12892 /* reset general handler mask, chip MSI-X mappings */
12893 reset_interrupts(dd);
12894
12895 if (single_interrupt)
12896 ret = request_intx_irq(dd);
12897 else
12898 ret = request_msix_irqs(dd);
12899 if (ret)
12900 goto fail;
12901
12902 return 0;
12903
12904fail:
12905 clean_up_interrupts(dd);
12906 return ret;
12907}
12908
12909/*
12910 * Set up context values in dd. Sets:
12911 *
12912 * num_rcv_contexts - number of contexts being used
12913 * n_krcv_queues - number of kernel contexts
12914 * first_user_ctxt - first non-kernel context in array of contexts
12915 * freectxts - number of free user contexts
12916 * num_send_contexts - number of PIO send contexts being used
12917 */
12918static int set_up_context_variables(struct hfi1_devdata *dd)
12919{
Harish Chegondi429b6a72016-08-31 07:24:40 -070012920 unsigned long num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012921 int total_contexts;
12922 int ret;
12923 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070012924 int qos_rmt_count;
12925 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012926
12927 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012928 * Kernel receive contexts:
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012929 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070012930 * - Context 1 - first kernel context
12931 * - Context 2 - second kernel context
12932 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040012933 */
12934 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012935 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070012936 * n_krcvqs is the sum of module parameter kernel receive
12937 * contexts, krcvqs[]. It does not include the control
12938 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050012939 */
Dean Luick33a9eb52016-04-12 10:50:22 -070012940 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012941 else
Harish Chegondi8784ac02016-07-25 13:38:50 -070012942 num_kernel_contexts = DEFAULT_KRCVQS + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012943 /*
12944 * Every kernel receive context needs an ACK send context.
12945 * one send context is allocated for each VL{0-7} and VL15
12946 */
12947 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12948 dd_dev_err(dd,
Harish Chegondi429b6a72016-08-31 07:24:40 -070012949 "Reducing # kernel rcv contexts to: %d, from %lu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040012950 (int)(dd->chip_send_contexts - num_vls - 1),
Harish Chegondi429b6a72016-08-31 07:24:40 -070012951 num_kernel_contexts);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012952 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12953 }
12954 /*
Jubin John0852d242016-04-12 11:30:08 -070012955 * User contexts:
12956 * - default to 1 user context per real (non-HT) CPU core if
12957 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040012958 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050012959 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070012960 num_user_contexts =
Dennis Dalessandro41973442016-07-25 07:52:36 -070012961 cpumask_weight(&node_affinity.real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012962
12963 total_contexts = num_kernel_contexts + num_user_contexts;
12964
12965 /*
12966 * Adjust the counts given a global max.
12967 */
12968 if (total_contexts > dd->chip_rcv_contexts) {
12969 dd_dev_err(dd,
12970 "Reducing # user receive contexts to: %d, from %d\n",
12971 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12972 (int)num_user_contexts);
12973 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12974 /* recalculate */
12975 total_contexts = num_kernel_contexts + num_user_contexts;
12976 }
12977
Dean Luick8f000f72016-04-12 11:32:06 -070012978 /* each user context requires an entry in the RMT */
12979 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12980 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12981 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12982 dd_dev_err(dd,
12983 "RMT size is reducing the number of user receive contexts from %d to %d\n",
12984 (int)num_user_contexts,
12985 user_rmt_reduced);
12986 /* recalculate */
12987 num_user_contexts = user_rmt_reduced;
12988 total_contexts = num_kernel_contexts + num_user_contexts;
12989 }
12990
Mike Marciniszyn77241052015-07-30 15:17:43 -040012991 /* the first N are kernel contexts, the rest are user contexts */
12992 dd->num_rcv_contexts = total_contexts;
12993 dd->n_krcv_queues = num_kernel_contexts;
12994 dd->first_user_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080012995 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012996 dd->freectxts = num_user_contexts;
12997 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012998 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12999 (int)dd->chip_rcv_contexts,
13000 (int)dd->num_rcv_contexts,
13001 (int)dd->n_krcv_queues,
13002 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013003
13004 /*
13005 * Receive array allocation:
13006 * All RcvArray entries are divided into groups of 8. This
13007 * is required by the hardware and will speed up writes to
13008 * consecutive entries by using write-combining of the entire
13009 * cacheline.
13010 *
13011 * The number of groups are evenly divided among all contexts.
13012 * any left over groups will be given to the first N user
13013 * contexts.
13014 */
13015 dd->rcv_entries.group_size = RCV_INCREMENT;
13016 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13017 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13018 dd->rcv_entries.nctxt_extra = ngroups -
13019 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13020 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13021 dd->rcv_entries.ngroups,
13022 dd->rcv_entries.nctxt_extra);
13023 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13024 MAX_EAGER_ENTRIES * 2) {
13025 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13026 dd->rcv_entries.group_size;
13027 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013028 "RcvArray group count too high, change to %u\n",
13029 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013030 dd->rcv_entries.nctxt_extra = 0;
13031 }
13032 /*
13033 * PIO send contexts
13034 */
13035 ret = init_sc_pools_and_sizes(dd);
13036 if (ret >= 0) { /* success */
13037 dd->num_send_contexts = ret;
13038 dd_dev_info(
13039 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013040 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013041 dd->chip_send_contexts,
13042 dd->num_send_contexts,
13043 dd->sc_sizes[SC_KERNEL].count,
13044 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013045 dd->sc_sizes[SC_USER].count,
13046 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013047 ret = 0; /* success */
13048 }
13049
13050 return ret;
13051}
13052
13053/*
13054 * Set the device/port partition key table. The MAD code
13055 * will ensure that, at least, the partial management
13056 * partition key is present in the table.
13057 */
13058static void set_partition_keys(struct hfi1_pportdata *ppd)
13059{
13060 struct hfi1_devdata *dd = ppd->dd;
13061 u64 reg = 0;
13062 int i;
13063
13064 dd_dev_info(dd, "Setting partition keys\n");
13065 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13066 reg |= (ppd->pkeys[i] &
13067 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13068 ((i % 4) *
13069 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13070 /* Each register holds 4 PKey values. */
13071 if ((i % 4) == 3) {
13072 write_csr(dd, RCV_PARTITION_KEY +
13073 ((i - 3) * 2), reg);
13074 reg = 0;
13075 }
13076 }
13077
13078 /* Always enable HW pkeys check when pkeys table is set */
13079 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13080}
13081
13082/*
13083 * These CSRs and memories are uninitialized on reset and must be
13084 * written before reading to set the ECC/parity bits.
13085 *
13086 * NOTE: All user context CSRs that are not mmaped write-only
13087 * (e.g. the TID flows) must be initialized even if the driver never
13088 * reads them.
13089 */
13090static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13091{
13092 int i, j;
13093
13094 /* CceIntMap */
13095 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013096 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013097
13098 /* SendCtxtCreditReturnAddr */
13099 for (i = 0; i < dd->chip_send_contexts; i++)
13100 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13101
13102 /* PIO Send buffers */
13103 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080013104 /*
13105 * These are not normally read, and (presently) have no method
13106 * to be read, so are not pre-initialized
13107 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013108
13109 /* RcvHdrAddr */
13110 /* RcvHdrTailAddr */
13111 /* RcvTidFlowTable */
13112 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13113 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13114 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13115 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080013116 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013117 }
13118
13119 /* RcvArray */
13120 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080013121 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013122 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013123
13124 /* RcvQPMapTable */
13125 for (i = 0; i < 32; i++)
13126 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13127}
13128
13129/*
13130 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13131 */
13132static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13133 u64 ctrl_bits)
13134{
13135 unsigned long timeout;
13136 u64 reg;
13137
13138 /* is the condition present? */
13139 reg = read_csr(dd, CCE_STATUS);
13140 if ((reg & status_bits) == 0)
13141 return;
13142
13143 /* clear the condition */
13144 write_csr(dd, CCE_CTRL, ctrl_bits);
13145
13146 /* wait for the condition to clear */
13147 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13148 while (1) {
13149 reg = read_csr(dd, CCE_STATUS);
13150 if ((reg & status_bits) == 0)
13151 return;
13152 if (time_after(jiffies, timeout)) {
13153 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013154 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13155 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013156 return;
13157 }
13158 udelay(1);
13159 }
13160}
13161
13162/* set CCE CSRs to chip reset defaults */
13163static void reset_cce_csrs(struct hfi1_devdata *dd)
13164{
13165 int i;
13166
13167 /* CCE_REVISION read-only */
13168 /* CCE_REVISION2 read-only */
13169 /* CCE_CTRL - bits clear automatically */
13170 /* CCE_STATUS read-only, use CceCtrl to clear */
13171 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13172 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13173 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13174 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13175 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13176 /* CCE_ERR_STATUS read-only */
13177 write_csr(dd, CCE_ERR_MASK, 0);
13178 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13179 /* CCE_ERR_FORCE leave alone */
13180 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13181 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13182 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13183 /* CCE_PCIE_CTRL leave alone */
13184 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13185 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13186 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013187 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013188 }
13189 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13190 /* CCE_MSIX_PBA read-only */
13191 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13192 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13193 }
13194 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13195 write_csr(dd, CCE_INT_MAP, 0);
13196 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13197 /* CCE_INT_STATUS read-only */
13198 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13199 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13200 /* CCE_INT_FORCE leave alone */
13201 /* CCE_INT_BLOCKED read-only */
13202 }
13203 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13204 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13205}
13206
Mike Marciniszyn77241052015-07-30 15:17:43 -040013207/* set MISC CSRs to chip reset defaults */
13208static void reset_misc_csrs(struct hfi1_devdata *dd)
13209{
13210 int i;
13211
13212 for (i = 0; i < 32; i++) {
13213 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13214 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13215 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13216 }
Jubin John4d114fd2016-02-14 20:21:43 -080013217 /*
13218 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13219 * only be written 128-byte chunks
13220 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013221 /* init RSA engine to clear lingering errors */
13222 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13223 write_csr(dd, MISC_CFG_RSA_MU, 0);
13224 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13225 /* MISC_STS_8051_DIGEST read-only */
13226 /* MISC_STS_SBM_DIGEST read-only */
13227 /* MISC_STS_PCIE_DIGEST read-only */
13228 /* MISC_STS_FAB_DIGEST read-only */
13229 /* MISC_ERR_STATUS read-only */
13230 write_csr(dd, MISC_ERR_MASK, 0);
13231 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13232 /* MISC_ERR_FORCE leave alone */
13233}
13234
13235/* set TXE CSRs to chip reset defaults */
13236static void reset_txe_csrs(struct hfi1_devdata *dd)
13237{
13238 int i;
13239
13240 /*
13241 * TXE Kernel CSRs
13242 */
13243 write_csr(dd, SEND_CTRL, 0);
13244 __cm_reset(dd, 0); /* reset CM internal state */
13245 /* SEND_CONTEXTS read-only */
13246 /* SEND_DMA_ENGINES read-only */
13247 /* SEND_PIO_MEM_SIZE read-only */
13248 /* SEND_DMA_MEM_SIZE read-only */
13249 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13250 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13251 /* SEND_PIO_ERR_STATUS read-only */
13252 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13253 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13254 /* SEND_PIO_ERR_FORCE leave alone */
13255 /* SEND_DMA_ERR_STATUS read-only */
13256 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13257 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13258 /* SEND_DMA_ERR_FORCE leave alone */
13259 /* SEND_EGRESS_ERR_STATUS read-only */
13260 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13261 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13262 /* SEND_EGRESS_ERR_FORCE leave alone */
13263 write_csr(dd, SEND_BTH_QP, 0);
13264 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13265 write_csr(dd, SEND_SC2VLT0, 0);
13266 write_csr(dd, SEND_SC2VLT1, 0);
13267 write_csr(dd, SEND_SC2VLT2, 0);
13268 write_csr(dd, SEND_SC2VLT3, 0);
13269 write_csr(dd, SEND_LEN_CHECK0, 0);
13270 write_csr(dd, SEND_LEN_CHECK1, 0);
13271 /* SEND_ERR_STATUS read-only */
13272 write_csr(dd, SEND_ERR_MASK, 0);
13273 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13274 /* SEND_ERR_FORCE read-only */
13275 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013276 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013277 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013278 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13279 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13280 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013281 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013282 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013283 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013284 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013285 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013286 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013287 /* SEND_CM_CREDIT_USED_STATUS read-only */
13288 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13289 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13290 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13291 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13292 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13293 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013294 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013295 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13296 /* SEND_CM_CREDIT_USED_VL read-only */
13297 /* SEND_CM_CREDIT_USED_VL15 read-only */
13298 /* SEND_EGRESS_CTXT_STATUS read-only */
13299 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13300 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13301 /* SEND_EGRESS_ERR_INFO read-only */
13302 /* SEND_EGRESS_ERR_SOURCE read-only */
13303
13304 /*
13305 * TXE Per-Context CSRs
13306 */
13307 for (i = 0; i < dd->chip_send_contexts; i++) {
13308 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13309 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13310 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13311 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13312 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13313 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13314 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13315 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13316 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13317 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13318 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13319 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13320 }
13321
13322 /*
13323 * TXE Per-SDMA CSRs
13324 */
13325 for (i = 0; i < dd->chip_sdma_engines; i++) {
13326 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13327 /* SEND_DMA_STATUS read-only */
13328 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13329 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13330 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13331 /* SEND_DMA_HEAD read-only */
13332 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13333 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13334 /* SEND_DMA_IDLE_CNT read-only */
13335 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13336 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13337 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13338 /* SEND_DMA_ENG_ERR_STATUS read-only */
13339 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13340 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13341 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13342 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13343 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13344 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13345 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13346 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13347 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13348 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13349 }
13350}
13351
13352/*
13353 * Expect on entry:
13354 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13355 */
13356static void init_rbufs(struct hfi1_devdata *dd)
13357{
13358 u64 reg;
13359 int count;
13360
13361 /*
13362 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13363 * clear.
13364 */
13365 count = 0;
13366 while (1) {
13367 reg = read_csr(dd, RCV_STATUS);
13368 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13369 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13370 break;
13371 /*
13372 * Give up after 1ms - maximum wait time.
13373 *
Harish Chegondie8a70af2016-09-25 07:42:01 -070013374 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
Mike Marciniszyn77241052015-07-30 15:17:43 -040013375 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
Harish Chegondie8a70af2016-09-25 07:42:01 -070013376 * 136 KB / (66% * 250MB/s) = 844us
Mike Marciniszyn77241052015-07-30 15:17:43 -040013377 */
13378 if (count++ > 500) {
13379 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013380 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13381 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013382 break;
13383 }
13384 udelay(2); /* do not busy-wait the CSR */
13385 }
13386
13387 /* start the init - expect RcvCtrl to be 0 */
13388 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13389
13390 /*
13391 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13392 * period after the write before RcvStatus.RxRbufInitDone is valid.
13393 * The delay in the first run through the loop below is sufficient and
13394 * required before the first read of RcvStatus.RxRbufInintDone.
13395 */
13396 read_csr(dd, RCV_CTRL);
13397
13398 /* wait for the init to finish */
13399 count = 0;
13400 while (1) {
13401 /* delay is required first time through - see above */
13402 udelay(2); /* do not busy-wait the CSR */
13403 reg = read_csr(dd, RCV_STATUS);
13404 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13405 break;
13406
13407 /* give up after 100us - slowest possible at 33MHz is 73us */
13408 if (count++ > 50) {
13409 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013410 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13411 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013412 break;
13413 }
13414 }
13415}
13416
13417/* set RXE CSRs to chip reset defaults */
13418static void reset_rxe_csrs(struct hfi1_devdata *dd)
13419{
13420 int i, j;
13421
13422 /*
13423 * RXE Kernel CSRs
13424 */
13425 write_csr(dd, RCV_CTRL, 0);
13426 init_rbufs(dd);
13427 /* RCV_STATUS read-only */
13428 /* RCV_CONTEXTS read-only */
13429 /* RCV_ARRAY_CNT read-only */
13430 /* RCV_BUF_SIZE read-only */
13431 write_csr(dd, RCV_BTH_QP, 0);
13432 write_csr(dd, RCV_MULTICAST, 0);
13433 write_csr(dd, RCV_BYPASS, 0);
13434 write_csr(dd, RCV_VL15, 0);
13435 /* this is a clear-down */
13436 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013437 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013438 /* RCV_ERR_STATUS read-only */
13439 write_csr(dd, RCV_ERR_MASK, 0);
13440 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13441 /* RCV_ERR_FORCE leave alone */
13442 for (i = 0; i < 32; i++)
13443 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13444 for (i = 0; i < 4; i++)
13445 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13446 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13447 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13448 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13449 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13450 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13451 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13452 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13453 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13454 }
13455 for (i = 0; i < 32; i++)
13456 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13457
13458 /*
13459 * RXE Kernel and User Per-Context CSRs
13460 */
13461 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13462 /* kernel */
13463 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13464 /* RCV_CTXT_STATUS read-only */
13465 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13466 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13467 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13468 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13469 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13470 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13471 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13472 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13473 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13474 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13475
13476 /* user */
13477 /* RCV_HDR_TAIL read-only */
13478 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13479 /* RCV_EGR_INDEX_TAIL read-only */
13480 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13481 /* RCV_EGR_OFFSET_TAIL read-only */
13482 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013483 write_uctxt_csr(dd, i,
13484 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013485 }
13486 }
13487}
13488
13489/*
13490 * Set sc2vl tables.
13491 *
13492 * They power on to zeros, so to avoid send context errors
13493 * they need to be set:
13494 *
13495 * SC 0-7 -> VL 0-7 (respectively)
13496 * SC 15 -> VL 15
13497 * otherwise
13498 * -> VL 0
13499 */
13500static void init_sc2vl_tables(struct hfi1_devdata *dd)
13501{
13502 int i;
13503 /* init per architecture spec, constrained by hardware capability */
13504
13505 /* HFI maps sent packets */
13506 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13507 0,
13508 0, 0, 1, 1,
13509 2, 2, 3, 3,
13510 4, 4, 5, 5,
13511 6, 6, 7, 7));
13512 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13513 1,
13514 8, 0, 9, 0,
13515 10, 0, 11, 0,
13516 12, 0, 13, 0,
13517 14, 0, 15, 15));
13518 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13519 2,
13520 16, 0, 17, 0,
13521 18, 0, 19, 0,
13522 20, 0, 21, 0,
13523 22, 0, 23, 0));
13524 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13525 3,
13526 24, 0, 25, 0,
13527 26, 0, 27, 0,
13528 28, 0, 29, 0,
13529 30, 0, 31, 0));
13530
13531 /* DC maps received packets */
13532 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13533 15_0,
13534 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13535 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13536 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13537 31_16,
13538 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13539 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13540
13541 /* initialize the cached sc2vl values consistently with h/w */
13542 for (i = 0; i < 32; i++) {
13543 if (i < 8 || i == 15)
13544 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13545 else
13546 *((u8 *)(dd->sc2vl) + i) = 0;
13547 }
13548}
13549
13550/*
13551 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13552 * depend on the chip going through a power-on reset - a driver may be loaded
13553 * and unloaded many times.
13554 *
13555 * Do not write any CSR values to the chip in this routine - there may be
13556 * a reset following the (possible) FLR in this routine.
13557 *
13558 */
13559static void init_chip(struct hfi1_devdata *dd)
13560{
13561 int i;
13562
13563 /*
13564 * Put the HFI CSRs in a known state.
13565 * Combine this with a DC reset.
13566 *
13567 * Stop the device from doing anything while we do a
13568 * reset. We know there are no other active users of
13569 * the device since we are now in charge. Turn off
13570 * off all outbound and inbound traffic and make sure
13571 * the device does not generate any interrupts.
13572 */
13573
13574 /* disable send contexts and SDMA engines */
13575 write_csr(dd, SEND_CTRL, 0);
13576 for (i = 0; i < dd->chip_send_contexts; i++)
13577 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13578 for (i = 0; i < dd->chip_sdma_engines; i++)
13579 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13580 /* disable port (turn off RXE inbound traffic) and contexts */
13581 write_csr(dd, RCV_CTRL, 0);
13582 for (i = 0; i < dd->chip_rcv_contexts; i++)
13583 write_csr(dd, RCV_CTXT_CTRL, 0);
13584 /* mask all interrupt sources */
13585 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013586 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013587
13588 /*
13589 * DC Reset: do a full DC reset before the register clear.
13590 * A recommended length of time to hold is one CSR read,
13591 * so reread the CceDcCtrl. Then, hold the DC in reset
13592 * across the clear.
13593 */
13594 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013595 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013596
13597 if (use_flr) {
13598 /*
13599 * A FLR will reset the SPC core and part of the PCIe.
13600 * The parts that need to be restored have already been
13601 * saved.
13602 */
13603 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13604
13605 /* do the FLR, the DC reset will remain */
13606 hfi1_pcie_flr(dd);
13607
13608 /* restore command and BARs */
13609 restore_pci_variables(dd);
13610
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013611 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013612 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13613 hfi1_pcie_flr(dd);
13614 restore_pci_variables(dd);
13615 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013616 } else {
13617 dd_dev_info(dd, "Resetting CSRs with writes\n");
13618 reset_cce_csrs(dd);
13619 reset_txe_csrs(dd);
13620 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013621 reset_misc_csrs(dd);
13622 }
13623 /* clear the DC reset */
13624 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013625
Mike Marciniszyn77241052015-07-30 15:17:43 -040013626 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013627 setextled(dd, 0);
13628
Mike Marciniszyn77241052015-07-30 15:17:43 -040013629 /*
13630 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013631 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013632 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013633 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013634 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013635 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013636 * I2CCLK and I2CDAT will change per direction, and INT_N and
13637 * MODPRS_N are input only and their value is ignored.
13638 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013639 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13640 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013641 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013642}
13643
13644static void init_early_variables(struct hfi1_devdata *dd)
13645{
13646 int i;
13647
13648 /* assign link credit variables */
13649 dd->vau = CM_VAU;
13650 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013651 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013652 dd->link_credits--;
13653 dd->vcu = cu_to_vcu(hfi1_cu);
13654 /* enough room for 8 MAD packets plus header - 17K */
13655 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13656 if (dd->vl15_init > dd->link_credits)
13657 dd->vl15_init = dd->link_credits;
13658
13659 write_uninitialized_csrs_and_memories(dd);
13660
13661 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13662 for (i = 0; i < dd->num_pports; i++) {
13663 struct hfi1_pportdata *ppd = &dd->pport[i];
13664
13665 set_partition_keys(ppd);
13666 }
13667 init_sc2vl_tables(dd);
13668}
13669
13670static void init_kdeth_qp(struct hfi1_devdata *dd)
13671{
13672 /* user changed the KDETH_QP */
13673 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13674 /* out of range or illegal value */
13675 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13676 kdeth_qp = 0;
13677 }
13678 if (kdeth_qp == 0) /* not set, or failed range check */
13679 kdeth_qp = DEFAULT_KDETH_QP;
13680
13681 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013682 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13683 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013684
13685 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013686 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13687 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013688}
13689
13690/**
13691 * init_qpmap_table
13692 * @dd - device data
13693 * @first_ctxt - first context
13694 * @last_ctxt - first context
13695 *
13696 * This return sets the qpn mapping table that
13697 * is indexed by qpn[8:1].
13698 *
13699 * The routine will round robin the 256 settings
13700 * from first_ctxt to last_ctxt.
13701 *
13702 * The first/last looks ahead to having specialized
13703 * receive contexts for mgmt and bypass. Normal
13704 * verbs traffic will assumed to be on a range
13705 * of receive contexts.
13706 */
13707static void init_qpmap_table(struct hfi1_devdata *dd,
13708 u32 first_ctxt,
13709 u32 last_ctxt)
13710{
13711 u64 reg = 0;
13712 u64 regno = RCV_QP_MAP_TABLE;
13713 int i;
13714 u64 ctxt = first_ctxt;
13715
Dean Luick60d585ad2016-04-12 10:50:35 -070013716 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013717 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040013718 ctxt++;
13719 if (ctxt > last_ctxt)
13720 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070013721 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013722 write_csr(dd, regno, reg);
13723 reg = 0;
13724 regno += 8;
13725 }
13726 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013727
13728 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13729 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13730}
13731
Dean Luick372cc85a2016-04-12 11:30:51 -070013732struct rsm_map_table {
13733 u64 map[NUM_MAP_REGS];
13734 unsigned int used;
13735};
13736
Dean Luickb12349a2016-04-12 11:31:33 -070013737struct rsm_rule_data {
13738 u8 offset;
13739 u8 pkt_type;
13740 u32 field1_off;
13741 u32 field2_off;
13742 u32 index1_off;
13743 u32 index1_width;
13744 u32 index2_off;
13745 u32 index2_width;
13746 u32 mask1;
13747 u32 value1;
13748 u32 mask2;
13749 u32 value2;
13750};
13751
Dean Luick372cc85a2016-04-12 11:30:51 -070013752/*
13753 * Return an initialized RMT map table for users to fill in. OK if it
13754 * returns NULL, indicating no table.
13755 */
13756static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13757{
13758 struct rsm_map_table *rmt;
13759 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
13760
13761 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13762 if (rmt) {
13763 memset(rmt->map, rxcontext, sizeof(rmt->map));
13764 rmt->used = 0;
13765 }
13766
13767 return rmt;
13768}
13769
13770/*
13771 * Write the final RMT map table to the chip and free the table. OK if
13772 * table is NULL.
13773 */
13774static void complete_rsm_map_table(struct hfi1_devdata *dd,
13775 struct rsm_map_table *rmt)
13776{
13777 int i;
13778
13779 if (rmt) {
13780 /* write table to chip */
13781 for (i = 0; i < NUM_MAP_REGS; i++)
13782 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13783
13784 /* enable RSM */
13785 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13786 }
13787}
13788
Dean Luickb12349a2016-04-12 11:31:33 -070013789/*
13790 * Add a receive side mapping rule.
13791 */
13792static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13793 struct rsm_rule_data *rrd)
13794{
13795 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13796 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13797 1ull << rule_index | /* enable bit */
13798 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13799 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13800 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13801 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13802 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13803 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13804 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13805 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13806 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13807 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13808 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13809 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13810 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13811}
13812
Dean Luick4a818be2016-04-12 11:31:11 -070013813/* return the number of RSM map table entries that will be used for QOS */
13814static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13815 unsigned int *np)
13816{
13817 int i;
13818 unsigned int m, n;
13819 u8 max_by_vl = 0;
13820
13821 /* is QOS active at all? */
13822 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13823 num_vls == 1 ||
13824 krcvqsset <= 1)
13825 goto no_qos;
13826
13827 /* determine bits for qpn */
13828 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13829 if (krcvqs[i] > max_by_vl)
13830 max_by_vl = krcvqs[i];
13831 if (max_by_vl > 32)
13832 goto no_qos;
13833 m = ilog2(__roundup_pow_of_two(max_by_vl));
13834
13835 /* determine bits for vl */
13836 n = ilog2(__roundup_pow_of_two(num_vls));
13837
13838 /* reject if too much is used */
13839 if ((m + n) > 7)
13840 goto no_qos;
13841
13842 if (mp)
13843 *mp = m;
13844 if (np)
13845 *np = n;
13846
13847 return 1 << (m + n);
13848
13849no_qos:
13850 if (mp)
13851 *mp = 0;
13852 if (np)
13853 *np = 0;
13854 return 0;
13855}
13856
Mike Marciniszyn77241052015-07-30 15:17:43 -040013857/**
13858 * init_qos - init RX qos
13859 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070013860 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040013861 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013862 * This routine initializes Rule 0 and the RSM map table to implement
13863 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040013864 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013865 * If all of the limit tests succeed, qos is applied based on the array
13866 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013867 *
Dean Luick33a9eb52016-04-12 10:50:22 -070013868 * The number of vl bits (n) and the number of qpn bits (m) are computed to
13869 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013870 */
Dean Luick372cc85a2016-04-12 11:30:51 -070013871static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013872{
Dean Luickb12349a2016-04-12 11:31:33 -070013873 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013874 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070013875 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013876 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013877
Dean Luick4a818be2016-04-12 11:31:11 -070013878 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013879 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013880 rmt_entries = qos_rmt_entries(dd, &m, &n);
13881 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013882 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013883 qpns_per_vl = 1 << m;
13884
Dean Luick372cc85a2016-04-12 11:30:51 -070013885 /* enough room in the map table? */
13886 rmt_entries = 1 << (m + n);
13887 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050013888 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070013889
Dean Luick372cc85a2016-04-12 11:30:51 -070013890 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070013891 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013892 unsigned tctxt;
13893
13894 for (qpn = 0, tctxt = ctxt;
13895 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13896 unsigned idx, regoff, regidx;
13897
Dean Luick372cc85a2016-04-12 11:30:51 -070013898 /* generate the index the hardware will produce */
13899 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013900 regoff = (idx % 8) * 8;
13901 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070013902 /* replace default with context number */
13903 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040013904 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13905 << regoff);
13906 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070013907 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013908 if (tctxt == ctxt + krcvqs[i])
13909 tctxt = ctxt;
13910 }
13911 ctxt += krcvqs[i];
13912 }
Dean Luickb12349a2016-04-12 11:31:33 -070013913
13914 rrd.offset = rmt->used;
13915 rrd.pkt_type = 2;
13916 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13917 rrd.field2_off = LRH_SC_MATCH_OFFSET;
13918 rrd.index1_off = LRH_SC_SELECT_OFFSET;
13919 rrd.index1_width = n;
13920 rrd.index2_off = QPN_SELECT_OFFSET;
13921 rrd.index2_width = m + n;
13922 rrd.mask1 = LRH_BTH_MASK;
13923 rrd.value1 = LRH_BTH_VALUE;
13924 rrd.mask2 = LRH_SC_MASK;
13925 rrd.value2 = LRH_SC_VALUE;
13926
13927 /* add rule 0 */
13928 add_rsm_rule(dd, 0, &rrd);
13929
Dean Luick372cc85a2016-04-12 11:30:51 -070013930 /* mark RSM map entries as used */
13931 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070013932 /* map everything else to the mcast/err/vl15 context */
13933 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013934 dd->qos_shift = n + 1;
13935 return;
13936bail:
13937 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013938 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013939}
13940
Dean Luick8f000f72016-04-12 11:32:06 -070013941static void init_user_fecn_handling(struct hfi1_devdata *dd,
13942 struct rsm_map_table *rmt)
13943{
13944 struct rsm_rule_data rrd;
13945 u64 reg;
13946 int i, idx, regoff, regidx;
13947 u8 offset;
13948
13949 /* there needs to be enough room in the map table */
13950 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13951 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13952 return;
13953 }
13954
13955 /*
13956 * RSM will extract the destination context as an index into the
13957 * map table. The destination contexts are a sequential block
13958 * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13959 * Map entries are accessed as offset + extracted value. Adjust
13960 * the added offset so this sequence can be placed anywhere in
13961 * the table - as long as the entries themselves do not wrap.
13962 * There are only enough bits in offset for the table size, so
13963 * start with that to allow for a "negative" offset.
13964 */
13965 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13966 (int)dd->first_user_ctxt);
13967
13968 for (i = dd->first_user_ctxt, idx = rmt->used;
13969 i < dd->num_rcv_contexts; i++, idx++) {
13970 /* replace with identity mapping */
13971 regoff = (idx % 8) * 8;
13972 regidx = idx / 8;
13973 reg = rmt->map[regidx];
13974 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13975 reg |= (u64)i << regoff;
13976 rmt->map[regidx] = reg;
13977 }
13978
13979 /*
13980 * For RSM intercept of Expected FECN packets:
13981 * o packet type 0 - expected
13982 * o match on F (bit 95), using select/match 1, and
13983 * o match on SH (bit 133), using select/match 2.
13984 *
13985 * Use index 1 to extract the 8-bit receive context from DestQP
13986 * (start at bit 64). Use that as the RSM map table index.
13987 */
13988 rrd.offset = offset;
13989 rrd.pkt_type = 0;
13990 rrd.field1_off = 95;
13991 rrd.field2_off = 133;
13992 rrd.index1_off = 64;
13993 rrd.index1_width = 8;
13994 rrd.index2_off = 0;
13995 rrd.index2_width = 0;
13996 rrd.mask1 = 1;
13997 rrd.value1 = 1;
13998 rrd.mask2 = 1;
13999 rrd.value2 = 1;
14000
14001 /* add rule 1 */
14002 add_rsm_rule(dd, 1, &rrd);
14003
14004 rmt->used += dd->num_user_contexts;
14005}
14006
Mike Marciniszyn77241052015-07-30 15:17:43 -040014007static void init_rxe(struct hfi1_devdata *dd)
14008{
Dean Luick372cc85a2016-04-12 11:30:51 -070014009 struct rsm_map_table *rmt;
14010
Mike Marciniszyn77241052015-07-30 15:17:43 -040014011 /* enable all receive errors */
14012 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070014013
14014 rmt = alloc_rsm_map_table(dd);
14015 /* set up QOS, including the QPN map table */
14016 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070014017 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070014018 complete_rsm_map_table(dd, rmt);
14019 kfree(rmt);
14020
Mike Marciniszyn77241052015-07-30 15:17:43 -040014021 /*
14022 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14023 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14024 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14025 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14026 * Max_PayLoad_Size set to its minimum of 128.
14027 *
14028 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14029 * (64 bytes). Max_Payload_Size is possibly modified upward in
14030 * tune_pcie_caps() which is called after this routine.
14031 */
14032}
14033
14034static void init_other(struct hfi1_devdata *dd)
14035{
14036 /* enable all CCE errors */
14037 write_csr(dd, CCE_ERR_MASK, ~0ull);
14038 /* enable *some* Misc errors */
14039 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14040 /* enable all DC errors, except LCB */
14041 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14042 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14043}
14044
14045/*
14046 * Fill out the given AU table using the given CU. A CU is defined in terms
14047 * AUs. The table is a an encoding: given the index, how many AUs does that
14048 * represent?
14049 *
14050 * NOTE: Assumes that the register layout is the same for the
14051 * local and remote tables.
14052 */
14053static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14054 u32 csr0to3, u32 csr4to7)
14055{
14056 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080014057 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14058 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14059 2ull * cu <<
14060 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14061 4ull * cu <<
14062 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014063 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080014064 8ull * cu <<
14065 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14066 16ull * cu <<
14067 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14068 32ull * cu <<
14069 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14070 64ull * cu <<
14071 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014072}
14073
14074static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14075{
14076 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014077 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014078}
14079
14080void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14081{
14082 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014083 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014084}
14085
14086static void init_txe(struct hfi1_devdata *dd)
14087{
14088 int i;
14089
14090 /* enable all PIO, SDMA, general, and Egress errors */
14091 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14092 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14093 write_csr(dd, SEND_ERR_MASK, ~0ull);
14094 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14095
14096 /* enable all per-context and per-SDMA engine errors */
14097 for (i = 0; i < dd->chip_send_contexts; i++)
14098 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14099 for (i = 0; i < dd->chip_sdma_engines; i++)
14100 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14101
14102 /* set the local CU to AU mapping */
14103 assign_local_cm_au_table(dd, dd->vcu);
14104
14105 /*
14106 * Set reasonable default for Credit Return Timer
14107 * Don't set on Simulator - causes it to choke.
14108 */
14109 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14110 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14111}
14112
14113int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14114{
14115 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14116 unsigned sctxt;
14117 int ret = 0;
14118 u64 reg;
14119
14120 if (!rcd || !rcd->sc) {
14121 ret = -EINVAL;
14122 goto done;
14123 }
14124 sctxt = rcd->sc->hw_context;
14125 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14126 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14127 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14128 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14129 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14130 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14131 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14132 /*
14133 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014134 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014135 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014136 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14137 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14138 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14139 }
14140
14141 /* Enable J_KEY check on receive context. */
14142 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14143 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14144 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14145 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14146done:
14147 return ret;
14148}
14149
14150int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14151{
14152 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14153 unsigned sctxt;
14154 int ret = 0;
14155 u64 reg;
14156
14157 if (!rcd || !rcd->sc) {
14158 ret = -EINVAL;
14159 goto done;
14160 }
14161 sctxt = rcd->sc->hw_context;
14162 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14163 /*
14164 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14165 * This check would not have been enabled for A0 h/w, see
14166 * set_ctxt_jkey().
14167 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014168 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014169 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14170 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14171 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14172 }
14173 /* Turn off the J_KEY on the receive side */
14174 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14175done:
14176 return ret;
14177}
14178
14179int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14180{
14181 struct hfi1_ctxtdata *rcd;
14182 unsigned sctxt;
14183 int ret = 0;
14184 u64 reg;
14185
Jubin Johne4909742016-02-14 20:22:00 -080014186 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014187 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014188 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014189 ret = -EINVAL;
14190 goto done;
14191 }
14192 if (!rcd || !rcd->sc) {
14193 ret = -EINVAL;
14194 goto done;
14195 }
14196 sctxt = rcd->sc->hw_context;
14197 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14198 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14199 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14200 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14201 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014202 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014203 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14204done:
14205 return ret;
14206}
14207
14208int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14209{
14210 struct hfi1_ctxtdata *rcd;
14211 unsigned sctxt;
14212 int ret = 0;
14213 u64 reg;
14214
Jubin Johne4909742016-02-14 20:22:00 -080014215 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014216 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014217 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014218 ret = -EINVAL;
14219 goto done;
14220 }
14221 if (!rcd || !rcd->sc) {
14222 ret = -EINVAL;
14223 goto done;
14224 }
14225 sctxt = rcd->sc->hw_context;
14226 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14227 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14228 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14229 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14230done:
14231 return ret;
14232}
14233
14234/*
14235 * Start doing the clean up the the chip. Our clean up happens in multiple
14236 * stages and this is just the first.
14237 */
14238void hfi1_start_cleanup(struct hfi1_devdata *dd)
14239{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014240 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014241 free_cntrs(dd);
14242 free_rcverr(dd);
14243 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014244 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014245}
14246
14247#define HFI_BASE_GUID(dev) \
14248 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14249
14250/*
Dean Luick78eb1292016-03-05 08:49:45 -080014251 * Information can be shared between the two HFIs on the same ASIC
14252 * in the same OS. This function finds the peer device and sets
14253 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014254 */
Dean Luick78eb1292016-03-05 08:49:45 -080014255static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014256{
14257 unsigned long flags;
14258 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014259 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014260 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014261
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014262 /* pre-allocate the asic structure in case we are the first device */
14263 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14264 if (!asic_data)
14265 return -ENOMEM;
14266
Mike Marciniszyn77241052015-07-30 15:17:43 -040014267 spin_lock_irqsave(&hfi1_devs_lock, flags);
14268 /* Find our peer device */
14269 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14270 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14271 dd->unit != tmp->unit) {
14272 peer = tmp;
14273 break;
14274 }
14275 }
14276
Dean Luick78eb1292016-03-05 08:49:45 -080014277 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014278 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014279 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014280 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014281 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014282 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014283 mutex_init(&dd->asic_data->asic_resource_mutex);
14284 }
14285 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014286 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -040014287
14288 /* first one through - set up i2c devices */
14289 if (!peer)
14290 ret = set_up_i2c(dd, dd->asic_data);
14291
Dean Luick78eb1292016-03-05 08:49:45 -080014292 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014293}
14294
Dean Luick5d9157a2015-11-16 21:59:34 -050014295/*
14296 * Set dd->boardname. Use a generic name if a name is not returned from
14297 * EFI variable space.
14298 *
14299 * Return 0 on success, -ENOMEM if space could not be allocated.
14300 */
14301static int obtain_boardname(struct hfi1_devdata *dd)
14302{
14303 /* generic board description */
14304 const char generic[] =
14305 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14306 unsigned long size;
14307 int ret;
14308
14309 ret = read_hfi1_efi_var(dd, "description", &size,
14310 (void **)&dd->boardname);
14311 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014312 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014313 /* use generic description */
14314 dd->boardname = kstrdup(generic, GFP_KERNEL);
14315 if (!dd->boardname)
14316 return -ENOMEM;
14317 }
14318 return 0;
14319}
14320
Kaike Wan24487dd2016-02-26 13:33:23 -080014321/*
14322 * Check the interrupt registers to make sure that they are mapped correctly.
14323 * It is intended to help user identify any mismapping by VMM when the driver
14324 * is running in a VM. This function should only be called before interrupt
14325 * is set up properly.
14326 *
14327 * Return 0 on success, -EINVAL on failure.
14328 */
14329static int check_int_registers(struct hfi1_devdata *dd)
14330{
14331 u64 reg;
14332 u64 all_bits = ~(u64)0;
14333 u64 mask;
14334
14335 /* Clear CceIntMask[0] to avoid raising any interrupts */
14336 mask = read_csr(dd, CCE_INT_MASK);
14337 write_csr(dd, CCE_INT_MASK, 0ull);
14338 reg = read_csr(dd, CCE_INT_MASK);
14339 if (reg)
14340 goto err_exit;
14341
14342 /* Clear all interrupt status bits */
14343 write_csr(dd, CCE_INT_CLEAR, all_bits);
14344 reg = read_csr(dd, CCE_INT_STATUS);
14345 if (reg)
14346 goto err_exit;
14347
14348 /* Set all interrupt status bits */
14349 write_csr(dd, CCE_INT_FORCE, all_bits);
14350 reg = read_csr(dd, CCE_INT_STATUS);
14351 if (reg != all_bits)
14352 goto err_exit;
14353
14354 /* Restore the interrupt mask */
14355 write_csr(dd, CCE_INT_CLEAR, all_bits);
14356 write_csr(dd, CCE_INT_MASK, mask);
14357
14358 return 0;
14359err_exit:
14360 write_csr(dd, CCE_INT_MASK, mask);
14361 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14362 return -EINVAL;
14363}
14364
Mike Marciniszyn77241052015-07-30 15:17:43 -040014365/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014366 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014367 * @dev: the pci_dev for hfi1_ib device
14368 * @ent: pci_device_id struct for this dev
14369 *
14370 * Also allocates, initializes, and returns the devdata struct for this
14371 * device instance
14372 *
14373 * This is global, and is called directly at init to set up the
14374 * chip-specific function pointers for later use.
14375 */
14376struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14377 const struct pci_device_id *ent)
14378{
14379 struct hfi1_devdata *dd;
14380 struct hfi1_pportdata *ppd;
14381 u64 reg;
14382 int i, ret;
14383 static const char * const inames[] = { /* implementation names */
14384 "RTL silicon",
14385 "RTL VCS simulation",
14386 "RTL FPGA emulation",
14387 "Functional simulator"
14388 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014389 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014390
Jubin John17fb4f22016-02-14 20:21:52 -080014391 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14392 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014393 if (IS_ERR(dd))
14394 goto bail;
14395 ppd = dd->pport;
14396 for (i = 0; i < dd->num_pports; i++, ppd++) {
14397 int vl;
14398 /* init common fields */
14399 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14400 /* DC supports 4 link widths */
14401 ppd->link_width_supported =
14402 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14403 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14404 ppd->link_width_downgrade_supported =
14405 ppd->link_width_supported;
14406 /* start out enabling only 4X */
14407 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14408 ppd->link_width_downgrade_enabled =
14409 ppd->link_width_downgrade_supported;
14410 /* link width active is 0 when link is down */
14411 /* link width downgrade active is 0 when link is down */
14412
Jubin Johnd0d236e2016-02-14 20:20:15 -080014413 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14414 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014415 hfi1_early_err(&pdev->dev,
14416 "Invalid num_vls %u, using %u VLs\n",
14417 num_vls, HFI1_MAX_VLS_SUPPORTED);
14418 num_vls = HFI1_MAX_VLS_SUPPORTED;
14419 }
14420 ppd->vls_supported = num_vls;
14421 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014422 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014423 /* Set the default MTU. */
14424 for (vl = 0; vl < num_vls; vl++)
14425 dd->vld[vl].mtu = hfi1_max_mtu;
14426 dd->vld[15].mtu = MAX_MAD_PACKET;
14427 /*
14428 * Set the initial values to reasonable default, will be set
14429 * for real when link is up.
14430 */
14431 ppd->lstate = IB_PORT_DOWN;
14432 ppd->overrun_threshold = 0x4;
14433 ppd->phy_error_threshold = 0xf;
14434 ppd->port_crc_mode_enabled = link_crc_mask;
14435 /* initialize supported LTP CRC mode */
14436 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14437 /* initialize enabled LTP CRC mode */
14438 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14439 /* start in offline */
14440 ppd->host_link_state = HLS_DN_OFFLINE;
14441 init_vl_arb_caches(ppd);
Dean Luickf45c8dc2016-02-03 14:35:31 -080014442 ppd->last_pstate = 0xff; /* invalid value */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014443 }
14444
14445 dd->link_default = HLS_DN_POLL;
14446
14447 /*
14448 * Do remaining PCIe setup and save PCIe values in dd.
14449 * Any error printing is already done by the init code.
14450 * On return, we have the chip mapped.
14451 */
Easwar Hariharan26ea2542016-10-17 04:19:58 -070014452 ret = hfi1_pcie_ddinit(dd, pdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014453 if (ret < 0)
14454 goto bail_free;
14455
14456 /* verify that reads actually work, save revision for reset check */
14457 dd->revision = read_csr(dd, CCE_REVISION);
14458 if (dd->revision == ~(u64)0) {
14459 dd_dev_err(dd, "cannot read chip CSRs\n");
14460 ret = -EINVAL;
14461 goto bail_cleanup;
14462 }
14463 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14464 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14465 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14466 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14467
Jubin John4d114fd2016-02-14 20:21:43 -080014468 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014469 * Check interrupt registers mapping if the driver has no access to
14470 * the upstream component. In this case, it is likely that the driver
14471 * is running in a VM.
14472 */
14473 if (!parent) {
14474 ret = check_int_registers(dd);
14475 if (ret)
14476 goto bail_cleanup;
14477 }
14478
14479 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014480 * obtain the hardware ID - NOT related to unit, which is a
14481 * software enumeration
14482 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014483 reg = read_csr(dd, CCE_REVISION2);
14484 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14485 & CCE_REVISION2_HFI_ID_MASK;
14486 /* the variable size will remove unwanted bits */
14487 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14488 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14489 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014490 dd->icode < ARRAY_SIZE(inames) ?
14491 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014492
14493 /* speeds the hardware can support */
14494 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14495 /* speeds allowed to run at */
14496 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14497 /* give a reasonable active value, will be set on link up */
14498 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14499
14500 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14501 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14502 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14503 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14504 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14505 /* fix up link widths for emulation _p */
14506 ppd = dd->pport;
14507 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14508 ppd->link_width_supported =
14509 ppd->link_width_enabled =
14510 ppd->link_width_downgrade_supported =
14511 ppd->link_width_downgrade_enabled =
14512 OPA_LINK_WIDTH_1X;
14513 }
14514 /* insure num_vls isn't larger than number of sdma engines */
14515 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14516 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014517 num_vls, dd->chip_sdma_engines);
14518 num_vls = dd->chip_sdma_engines;
14519 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014520 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014521 }
14522
14523 /*
14524 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14525 * Limit the max if larger than the field holds. If timeout is
14526 * non-zero, then the calculated field will be at least 1.
14527 *
14528 * Must be after icode is set up - the cclock rate depends
14529 * on knowing the hardware being used.
14530 */
14531 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14532 if (dd->rcv_intr_timeout_csr >
14533 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14534 dd->rcv_intr_timeout_csr =
14535 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14536 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14537 dd->rcv_intr_timeout_csr = 1;
14538
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014539 /* needs to be done before we look for the peer device */
14540 read_guid(dd);
14541
Dean Luick78eb1292016-03-05 08:49:45 -080014542 /* set up shared ASIC data with peer device */
14543 ret = init_asic_data(dd);
14544 if (ret)
14545 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014546
Mike Marciniszyn77241052015-07-30 15:17:43 -040014547 /* obtain chip sizes, reset chip CSRs */
14548 init_chip(dd);
14549
14550 /* read in the PCIe link speed information */
14551 ret = pcie_speeds(dd);
14552 if (ret)
14553 goto bail_cleanup;
14554
Dean Luicke83eba22016-09-30 04:41:45 -070014555 /* call before get_platform_config(), after init_chip_resources() */
14556 ret = eprom_init(dd);
14557 if (ret)
14558 goto bail_free_rcverr;
14559
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014560 /* Needs to be called before hfi1_firmware_init */
14561 get_platform_config(dd);
14562
Mike Marciniszyn77241052015-07-30 15:17:43 -040014563 /* read in firmware */
14564 ret = hfi1_firmware_init(dd);
14565 if (ret)
14566 goto bail_cleanup;
14567
14568 /*
14569 * In general, the PCIe Gen3 transition must occur after the
14570 * chip has been idled (so it won't initiate any PCIe transactions
14571 * e.g. an interrupt) and before the driver changes any registers
14572 * (the transition will reset the registers).
14573 *
14574 * In particular, place this call after:
14575 * - init_chip() - the chip will not initiate any PCIe transactions
14576 * - pcie_speeds() - reads the current link speed
14577 * - hfi1_firmware_init() - the needed firmware is ready to be
14578 * downloaded
14579 */
14580 ret = do_pcie_gen3_transition(dd);
14581 if (ret)
14582 goto bail_cleanup;
14583
14584 /* start setting dd values and adjusting CSRs */
14585 init_early_variables(dd);
14586
14587 parse_platform_config(dd);
14588
Dean Luick5d9157a2015-11-16 21:59:34 -050014589 ret = obtain_boardname(dd);
14590 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014591 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014592
14593 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014594 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014595 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014596 (u32)dd->majrev,
14597 (u32)dd->minrev,
14598 (dd->revision >> CCE_REVISION_SW_SHIFT)
14599 & CCE_REVISION_SW_MASK);
14600
14601 ret = set_up_context_variables(dd);
14602 if (ret)
14603 goto bail_cleanup;
14604
14605 /* set initial RXE CSRs */
14606 init_rxe(dd);
14607 /* set initial TXE CSRs */
14608 init_txe(dd);
14609 /* set initial non-RXE, non-TXE CSRs */
14610 init_other(dd);
14611 /* set up KDETH QP prefix in both RX and TX CSRs */
14612 init_kdeth_qp(dd);
14613
Dennis Dalessandro41973442016-07-25 07:52:36 -070014614 ret = hfi1_dev_affinity_init(dd);
14615 if (ret)
14616 goto bail_cleanup;
Mitko Haralanov957558c2016-02-03 14:33:40 -080014617
Mike Marciniszyn77241052015-07-30 15:17:43 -040014618 /* send contexts must be set up before receive contexts */
14619 ret = init_send_contexts(dd);
14620 if (ret)
14621 goto bail_cleanup;
14622
14623 ret = hfi1_create_ctxts(dd);
14624 if (ret)
14625 goto bail_cleanup;
14626
14627 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14628 /*
14629 * rcd[0] is guaranteed to be valid by this point. Also, all
14630 * context are using the same value, as per the module parameter.
14631 */
14632 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14633
14634 ret = init_pervl_scs(dd);
14635 if (ret)
14636 goto bail_cleanup;
14637
14638 /* sdma init */
14639 for (i = 0; i < dd->num_pports; ++i) {
14640 ret = sdma_init(dd, i);
14641 if (ret)
14642 goto bail_cleanup;
14643 }
14644
14645 /* use contexts created by hfi1_create_ctxts */
14646 ret = set_up_interrupts(dd);
14647 if (ret)
14648 goto bail_cleanup;
14649
14650 /* set up LCB access - must be after set_up_interrupts() */
14651 init_lcb_access(dd);
14652
Ira Weinyfc0b76c2016-07-27 21:09:40 -040014653 /*
14654 * Serial number is created from the base guid:
14655 * [27:24] = base guid [38:35]
14656 * [23: 0] = base guid [23: 0]
14657 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014658 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
Ira Weinyfc0b76c2016-07-27 21:09:40 -040014659 (dd->base_guid & 0xFFFFFF) |
14660 ((dd->base_guid >> 11) & 0xF000000));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014661
14662 dd->oui1 = dd->base_guid >> 56 & 0xFF;
14663 dd->oui2 = dd->base_guid >> 48 & 0xFF;
14664 dd->oui3 = dd->base_guid >> 40 & 0xFF;
14665
14666 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14667 if (ret)
14668 goto bail_clear_intr;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014669
14670 thermal_init(dd);
14671
14672 ret = init_cntrs(dd);
14673 if (ret)
14674 goto bail_clear_intr;
14675
14676 ret = init_rcverr(dd);
14677 if (ret)
14678 goto bail_free_cntrs;
14679
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -070014680 init_completion(&dd->user_comp);
14681
14682 /* The user refcount starts with one to inidicate an active device */
14683 atomic_set(&dd->user_refcount, 1);
14684
Mike Marciniszyn77241052015-07-30 15:17:43 -040014685 goto bail;
14686
14687bail_free_rcverr:
14688 free_rcverr(dd);
14689bail_free_cntrs:
14690 free_cntrs(dd);
14691bail_clear_intr:
14692 clean_up_interrupts(dd);
14693bail_cleanup:
14694 hfi1_pcie_ddcleanup(dd);
14695bail_free:
14696 hfi1_free_devdata(dd);
14697 dd = ERR_PTR(ret);
14698bail:
14699 return dd;
14700}
14701
14702static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14703 u32 dw_len)
14704{
14705 u32 delta_cycles;
14706 u32 current_egress_rate = ppd->current_egress_rate;
14707 /* rates here are in units of 10^6 bits/sec */
14708
14709 if (desired_egress_rate == -1)
14710 return 0; /* shouldn't happen */
14711
14712 if (desired_egress_rate >= current_egress_rate)
14713 return 0; /* we can't help go faster, only slower */
14714
14715 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14716 egress_cycles(dw_len * 4, current_egress_rate);
14717
14718 return (u16)delta_cycles;
14719}
14720
Mike Marciniszyn77241052015-07-30 15:17:43 -040014721/**
14722 * create_pbc - build a pbc for transmission
14723 * @flags: special case flags or-ed in built pbc
14724 * @srate: static rate
14725 * @vl: vl
14726 * @dwlen: dword length (header words + data words + pbc words)
14727 *
14728 * Create a PBC with the given flags, rate, VL, and length.
14729 *
14730 * NOTE: The PBC created will not insert any HCRC - all callers but one are
14731 * for verbs, which does not use this PSM feature. The lone other caller
14732 * is for the diagnostic interface which calls this if the user does not
14733 * supply their own PBC.
14734 */
14735u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14736 u32 dw_len)
14737{
14738 u64 pbc, delay = 0;
14739
14740 if (unlikely(srate_mbs))
14741 delay = delay_cycles(ppd, srate_mbs, dw_len);
14742
14743 pbc = flags
14744 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14745 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14746 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14747 | (dw_len & PBC_LENGTH_DWS_MASK)
14748 << PBC_LENGTH_DWS_SHIFT;
14749
14750 return pbc;
14751}
14752
14753#define SBUS_THERMAL 0x4f
14754#define SBUS_THERM_MONITOR_MODE 0x1
14755
14756#define THERM_FAILURE(dev, ret, reason) \
14757 dd_dev_err((dd), \
14758 "Thermal sensor initialization failed: %s (%d)\n", \
14759 (reason), (ret))
14760
14761/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070014762 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014763 *
14764 * After initialization, enable polling of thermal sensor through
14765 * SBus interface. In order for this to work, the SBus Master
14766 * firmware has to be loaded due to the fact that the HW polling
14767 * logic uses SBus interrupts, which are not supported with
14768 * default firmware. Otherwise, no data will be returned through
14769 * the ASIC_STS_THERM CSR.
14770 */
14771static int thermal_init(struct hfi1_devdata *dd)
14772{
14773 int ret = 0;
14774
14775 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080014776 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040014777 return ret;
14778
Dean Luick576531f2016-03-05 08:50:01 -080014779 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14780 if (ret) {
14781 THERM_FAILURE(dd, ret, "Acquire SBus");
14782 return ret;
14783 }
14784
Mike Marciniszyn77241052015-07-30 15:17:43 -040014785 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050014786 /* Disable polling of thermal readings */
14787 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14788 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014789 /* Thermal Sensor Initialization */
14790 /* Step 1: Reset the Thermal SBus Receiver */
14791 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14792 RESET_SBUS_RECEIVER, 0);
14793 if (ret) {
14794 THERM_FAILURE(dd, ret, "Bus Reset");
14795 goto done;
14796 }
14797 /* Step 2: Set Reset bit in Thermal block */
14798 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14799 WRITE_SBUS_RECEIVER, 0x1);
14800 if (ret) {
14801 THERM_FAILURE(dd, ret, "Therm Block Reset");
14802 goto done;
14803 }
14804 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
14805 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14806 WRITE_SBUS_RECEIVER, 0x32);
14807 if (ret) {
14808 THERM_FAILURE(dd, ret, "Write Clock Div");
14809 goto done;
14810 }
14811 /* Step 4: Select temperature mode */
14812 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14813 WRITE_SBUS_RECEIVER,
14814 SBUS_THERM_MONITOR_MODE);
14815 if (ret) {
14816 THERM_FAILURE(dd, ret, "Write Mode Sel");
14817 goto done;
14818 }
14819 /* Step 5: De-assert block reset and start conversion */
14820 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14821 WRITE_SBUS_RECEIVER, 0x2);
14822 if (ret) {
14823 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14824 goto done;
14825 }
14826 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
14827 msleep(22);
14828
14829 /* Enable polling of thermal readings */
14830 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080014831
14832 /* Set initialized flag */
14833 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14834 if (ret)
14835 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14836
Mike Marciniszyn77241052015-07-30 15:17:43 -040014837done:
Dean Luick576531f2016-03-05 08:50:01 -080014838 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014839 return ret;
14840}
14841
14842static void handle_temp_err(struct hfi1_devdata *dd)
14843{
14844 struct hfi1_pportdata *ppd = &dd->pport[0];
14845 /*
14846 * Thermal Critical Interrupt
14847 * Put the device into forced freeze mode, take link down to
14848 * offline, and put DC into reset.
14849 */
14850 dd_dev_emerg(dd,
14851 "Critical temperature reached! Forcing device into freeze mode!\n");
14852 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080014853 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014854 /*
14855 * Shut DC down as much and as quickly as possible.
14856 *
14857 * Step 1: Take the link down to OFFLINE. This will cause the
14858 * 8051 to put the Serdes in reset. However, we don't want to
14859 * go through the entire link state machine since we want to
14860 * shutdown ASAP. Furthermore, this is not a graceful shutdown
14861 * but rather an attempt to save the chip.
14862 * Code below is almost the same as quiet_serdes() but avoids
14863 * all the extra work and the sleeps.
14864 */
14865 ppd->driver_link_ready = 0;
14866 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080014867 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14868 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014869 /*
14870 * Step 2: Shutdown LCB and 8051
14871 * After shutdown, do not restore DC_CFG_RESET value.
14872 */
14873 dc_shutdown(dd);
14874}