blob: 3fae98d079e4897173c2210f5cd48ceca8e17b9e [file] [log] [blame]
Mike Marciniszyn77241052015-07-30 15:17:43 -04001/*
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07002 * Copyright(c) 2015 - 2017 Intel Corporation.
Mike Marciniszyn77241052015-07-30 15:17:43 -04003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mike Marciniszyn77241052015-07-30 15:17:43 -04009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mike Marciniszyn77241052015-07-30 15:17:43 -040020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48/*
49 * This file contains all of the code that is specific to the HFI chip
50 */
51
52#include <linux/pci.h>
53#include <linux/delay.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56
57#include "hfi.h"
58#include "trace.h"
59#include "mad.h"
60#include "pio.h"
61#include "sdma.h"
62#include "eprom.h"
Dean Luick5d9157a2015-11-16 21:59:34 -050063#include "efivar.h"
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080064#include "platform.h"
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080065#include "aspm.h"
Dennis Dalessandro41973442016-07-25 07:52:36 -070066#include "affinity.h"
Don Hiatt243d9f42017-03-20 17:26:20 -070067#include "debugfs.h"
Mike Marciniszyn77241052015-07-30 15:17:43 -040068
69#define NUM_IB_PORTS 1
70
71uint kdeth_qp;
72module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74
75uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76module_param(num_vls, uint, S_IRUGO);
77MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
78
79/*
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
85 */
86uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87module_param(rcv_intr_timeout, uint, S_IRUGO);
88MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89
90uint rcv_intr_count = 16; /* same as qib */
91module_param(rcv_intr_count, uint, S_IRUGO);
92MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93
94ushort link_crc_mask = SUPPORTED_CRCS;
95module_param(link_crc_mask, ushort, S_IRUGO);
96MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
97
98uint loopback;
99module_param_named(loopback, loopback, uint, S_IRUGO);
100MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101
102/* Other driver tunables */
103uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104static ushort crc_14b_sideband = 1;
105static uint use_flr = 1;
106uint quick_linkup; /* skip LNI */
107
108struct flag_table {
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
112 u16 unused0;
113 u32 unused1;
114};
115
116/* str must be a string constant */
117#define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118#define FLAG_ENTRY0(str, flag) {flag, str, 0}
119
120/* Send Error Consequences */
121#define SEC_WRITE_DROPPED 0x1
122#define SEC_PACKET_DROPPED 0x2
123#define SEC_SC_HALTED 0x4 /* per-context only */
124#define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125
Harish Chegondi8784ac02016-07-25 13:38:50 -0700126#define DEFAULT_KRCVQS 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400127#define MIN_KERNEL_KCTXTS 2
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -0500128#define FIRST_KERNEL_KCTXT 1
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700129
130/*
131 * RSM instance allocation
132 * 0 - Verbs
133 * 1 - User Fecn Handling
134 * 2 - Vnic
135 */
136#define RSM_INS_VERBS 0
137#define RSM_INS_FECN 1
138#define RSM_INS_VNIC 2
Mike Marciniszyn77241052015-07-30 15:17:43 -0400139
140/* Bit offset into the GUID which carries HFI id information */
141#define GUID_HFI_INDEX_SHIFT 39
142
143/* extract the emulation revision */
144#define emulator_rev(dd) ((dd)->irev >> 8)
145/* parallel and serial emulation versions are 3 and 4 respectively */
146#define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147#define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
148
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700149/* RSM fields for Verbs */
Mike Marciniszyn77241052015-07-30 15:17:43 -0400150/* packet type */
151#define IB_PACKET_TYPE 2ull
152#define QW_SHIFT 6ull
153/* QPN[7..1] */
154#define QPN_WIDTH 7ull
155
156/* LRH.BTH: QW 0, OFFSET 48 - for match */
157#define LRH_BTH_QW 0ull
158#define LRH_BTH_BIT_OFFSET 48ull
159#define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160#define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161#define LRH_BTH_SELECT
162#define LRH_BTH_MASK 3ull
163#define LRH_BTH_VALUE 2ull
164
165/* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166#define LRH_SC_QW 0ull
167#define LRH_SC_BIT_OFFSET 56ull
168#define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169#define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170#define LRH_SC_MASK 128ull
171#define LRH_SC_VALUE 0ull
172
173/* SC[n..0] QW 0, OFFSET 60 - for select */
174#define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
175
176/* QPN[m+n:1] QW 1, OFFSET 1 */
177#define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
178
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -0700179/* RSM fields for Vnic */
180/* L2_TYPE: QW 0, OFFSET 61 - for match */
181#define L2_TYPE_QW 0ull
182#define L2_TYPE_BIT_OFFSET 61ull
183#define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184#define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185#define L2_TYPE_MASK 3ull
186#define L2_16B_VALUE 2ull
187
188/* L4_TYPE QW 1, OFFSET 0 - for match */
189#define L4_TYPE_QW 1ull
190#define L4_TYPE_BIT_OFFSET 0ull
191#define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192#define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193#define L4_16B_TYPE_MASK 0xFFull
194#define L4_16B_ETH_VALUE 0x78ull
195
196/* 16B VESWID - for select */
197#define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198/* 16B ENTROPY - for select */
199#define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
200
Mike Marciniszyn77241052015-07-30 15:17:43 -0400201/* defines to build power on SC2VL table */
202#define SC2VL_VAL( \
203 num, \
204 sc0, sc0val, \
205 sc1, sc1val, \
206 sc2, sc2val, \
207 sc3, sc3val, \
208 sc4, sc4val, \
209 sc5, sc5val, \
210 sc6, sc6val, \
211 sc7, sc7val) \
212( \
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
221)
222
223#define DC_SC_VL_VAL( \
224 range, \
225 e0, e0val, \
226 e1, e1val, \
227 e2, e2val, \
228 e3, e3val, \
229 e4, e4val, \
230 e5, e5val, \
231 e6, e6val, \
232 e7, e7val, \
233 e8, e8val, \
234 e9, e9val, \
235 e10, e10val, \
236 e11, e11val, \
237 e12, e12val, \
238 e13, e13val, \
239 e14, e14val, \
240 e15, e15val) \
241( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
258)
259
260/* all CceStatus sub-block freeze bits */
261#define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265/* all CceStatus sub-block TXE pause bits */
266#define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269/* all CceStatus sub-block RXE pause bits */
270#define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
271
Jakub Pawlak2b719042016-07-01 16:01:22 -0700272#define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273#define CNTR_32BIT_MAX 0x00000000FFFFFFFF
274
Mike Marciniszyn77241052015-07-30 15:17:43 -0400275/*
276 * CCE Error flags.
277 */
278static struct flag_table cce_err_status_flags[] = {
279/* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281/* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283/* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285/* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287/* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289/* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291/* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293/* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295/* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297/* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299/*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301/*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303/*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305/*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307/*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309/*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311/*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313/*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315/*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317/*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319/*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321/*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323/*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325/*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327/*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329/*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331/*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333/*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335/*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337/*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339/*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341/*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343/*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345/*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347/*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349/*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351/*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353/*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355/*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357/*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359/*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
361/*41-63 reserved*/
362};
363
364/*
365 * Misc Error flags
366 */
367#define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368static struct flag_table misc_err_status_flags[] = {
369/* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370/* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371/* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372/* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373/* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374/* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375/* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376/* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377/* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378/* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379/*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380/*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381/*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
382};
383
384/*
385 * TXE PIO Error flags and consequences
386 */
387static struct flag_table pio_err_status_flags[] = {
388/* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
389 SEC_WRITE_DROPPED,
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391/* 1*/ FLAG_ENTRY("PioWriteAddrParity",
392 SEC_SPC_FREEZE,
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394/* 2*/ FLAG_ENTRY("PioCsrParity",
395 SEC_SPC_FREEZE,
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397/* 3*/ FLAG_ENTRY("PioSbMemFifo0",
398 SEC_SPC_FREEZE,
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400/* 4*/ FLAG_ENTRY("PioSbMemFifo1",
401 SEC_SPC_FREEZE,
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403/* 5*/ FLAG_ENTRY("PioPccFifoParity",
404 SEC_SPC_FREEZE,
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406/* 6*/ FLAG_ENTRY("PioPecFifoParity",
407 SEC_SPC_FREEZE,
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409/* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
410 SEC_SPC_FREEZE,
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412/* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
413 SEC_SPC_FREEZE,
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415/* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
416 SEC_SPC_FREEZE,
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418/*10*/ FLAG_ENTRY("PioSmPktResetParity",
419 SEC_SPC_FREEZE,
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421/*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
422 SEC_SPC_FREEZE,
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424/*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
425 SEC_SPC_FREEZE,
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427/*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
428 0,
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430/*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
431 0,
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433/*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
434 SEC_SPC_FREEZE,
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436/*16*/ FLAG_ENTRY("PioPpmcPblFifo",
437 SEC_SPC_FREEZE,
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439/*17*/ FLAG_ENTRY("PioInitSmIn",
440 0,
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442/*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
443 SEC_SPC_FREEZE,
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445/*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
446 SEC_SPC_FREEZE,
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448/*20*/ FLAG_ENTRY("PioHostAddrMemCor",
449 0,
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451/*21*/ FLAG_ENTRY("PioWriteDataParity",
452 SEC_SPC_FREEZE,
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454/*22*/ FLAG_ENTRY("PioStateMachine",
455 SEC_SPC_FREEZE,
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
Jubin John8638b772016-02-14 20:19:24 -0800458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
Jubin John8638b772016-02-14 20:19:24 -0800461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
Mike Marciniszyn77241052015-07-30 15:17:43 -0400462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
464 SEC_SPC_FREEZE,
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466/*26*/ FLAG_ENTRY("PioVlfSopParity",
467 SEC_SPC_FREEZE,
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469/*27*/ FLAG_ENTRY("PioVlFifoParity",
470 SEC_SPC_FREEZE,
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472/*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
473 SEC_SPC_FREEZE,
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475/*29*/ FLAG_ENTRY("PioPpmcSopLen",
476 SEC_SPC_FREEZE,
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
478/*30-31 reserved*/
479/*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
480 SEC_SPC_FREEZE,
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482/*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
483 SEC_SPC_FREEZE,
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485/*34*/ FLAG_ENTRY("PioPccSopHeadParity",
486 SEC_SPC_FREEZE,
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488/*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
489 SEC_SPC_FREEZE,
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
491/*36-63 reserved*/
492};
493
494/* TXE PIO errors that cause an SPC freeze */
495#define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
525
526/*
527 * TXE SDMA Error flags
528 */
529static struct flag_table sdma_err_status_flags[] = {
530/* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532/* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534/* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536/* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
538/*04-63 reserved*/
539};
540
541/* TXE SDMA errors that cause an SPC freeze */
542#define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
546
Mike Marciniszyn69a00b82016-02-03 14:31:49 -0800547/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548#define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
552
Mike Marciniszyn77241052015-07-30 15:17:43 -0400553/*
554 * TXE Egress Error flags
555 */
556#define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557static struct flag_table egress_err_status_flags[] = {
558/* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559/* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
560/* 2 reserved */
561/* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563/* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564/* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
565/* 6 reserved */
566/* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568/* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
570/* 9-10 reserved */
571/*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573/*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574/*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575/*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576/*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577/*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579/*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581/*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583/*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585/*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587/*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589/*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591/*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593/*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595/*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597/*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599/*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601/*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603/*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605/*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607/*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609/*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611/*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613/*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615/*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617/*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619/*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621/*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623/*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625/*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627/*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628/*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629/*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630/*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631/*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632/*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633/*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634/*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635/*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636/*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637/*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638/*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639/*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640/*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641/*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642/*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643/*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644/*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645/*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646/*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647/*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648/*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650/*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
652};
653
654/*
655 * TXE Egress Error Info flags
656 */
657#define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658static struct flag_table egress_err_info_flags[] = {
659/* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660/* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661/* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662/* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663/* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664/* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665/* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666/* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667/* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668/* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669/*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670/*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671/*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672/*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673/*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674/*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675/*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676/*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677/*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678/*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679/*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680/*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
681};
682
683/* TXE Egress errors that cause an SPC freeze */
684#define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
702
703/*
704 * TXE Send error flags
705 */
706#define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707static struct flag_table send_err_status_flags[] = {
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -0500708/* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400709/* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710/* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
711};
712
713/*
714 * TXE Send Context Error flags and consequences
715 */
716static struct flag_table sc_err_status_flags[] = {
717/* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720/* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723/* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726/* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729/* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
732/* 5-63 reserved*/
733};
734
735/*
736 * RXE Receive Error flags
737 */
738#define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739static struct flag_table rxe_err_status_flags[] = {
740/* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741/* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742/* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743/* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744/* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745/* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746/* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747/* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748/* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749/* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750/*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751/*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752/*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753/*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754/*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755/*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756/*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758/*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759/*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760/*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762/*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764/*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766/*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768/*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770/*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772/*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773/*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774/*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776/*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777/*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778/*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779/*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780/*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781/*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782/*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783/*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785/*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787/*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788/*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789/*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790/*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792/*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794/*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795/*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796/*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797/*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798/*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799/*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800/*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801/*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802/*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803/*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804/*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805/*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806/*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807/*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808/*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809/*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810/*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811/*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812/*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813/*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814/*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815/*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
816};
817
818/* RXE errors that will trigger an SPC freeze */
819#define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
864
865#define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
869
870/*
871 * DCC Error Flags
872 */
873#define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
921};
922
923/*
924 * LCB error flags
925 */
926#define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927static struct flag_table lcb_err_flags[] = {
928/* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929/* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930/* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931/* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933/* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934/* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935/* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936/* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937/* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938/* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939/*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940/*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941/*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942/*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944/*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945/*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946/*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947/*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948/*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949/*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951/*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952/*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953/*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954/*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955/*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956/*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957/*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959/*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960/*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962/*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
964};
965
966/*
967 * DC8051 Error Flags
968 */
969#define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
Jubin John17fb4f22016-02-14 20:21:52 -0800980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
982};
983
984/*
985 * DC8051 Information Error flags
986 *
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
988 */
989static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
Jubin John17fb4f22016-02-14 20:21:52 -0800994 FAILED_SERDES_INTERNAL_LOOPBACK),
Mike Marciniszyn77241052015-07-30 15:17:43 -0400995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
Jubin John8fefef12016-03-05 08:50:38 -08001002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
Dean Luick50921be2016-09-25 07:41:53 -07001003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
Mike Marciniszyn77241052015-07-30 15:17:43 -04001006};
1007
1008/*
1009 * DC8051 Information Host Information flags
1010 *
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012 */
1013static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
1015 FLAG_ENTRY0("BC SMA message", 0x0002),
1016 FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
1017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
1023};
1024
Mike Marciniszyn77241052015-07-30 15:17:43 -04001025static u32 encoded_size(u32 size);
1026static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029 u8 *continuous);
1030static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033 u8 *remote_tx_rate, u16 *link_widths);
1034static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1035 u8 *flag_bits, u16 *link_widths);
1036static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037 u8 *device_rev);
1038static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1039static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1040static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1041 u8 *tx_polarity_inversion,
1042 u8 *rx_polarity_inversion, u8 *max_rate);
1043static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1044 unsigned int context, u64 err_status);
1045static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1046static void handle_dcc_err(struct hfi1_devdata *dd,
1047 unsigned int context, u64 err_status);
1048static void handle_lcb_err(struct hfi1_devdata *dd,
1049 unsigned int context, u64 err_status);
1050static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001058static void set_partition_keys(struct hfi1_pportdata *ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001059static const char *link_state_name(u32 state);
1060static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1061 u32 state);
1062static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1063 u64 *out_data);
1064static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1065static int thermal_init(struct hfi1_devdata *dd);
1066
1067static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1068 int msecs);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -07001069static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 int msecs);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001071static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
Dean Luickfeb831d2016-04-14 08:31:36 -07001072static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
Michael J. Ruhlf4cd8762017-05-04 05:14:39 -07001073static void handle_temp_err(struct hfi1_devdata *dd);
1074static void dc_shutdown(struct hfi1_devdata *dd);
1075static void dc_start(struct hfi1_devdata *dd);
Dean Luick8f000f72016-04-12 11:32:06 -07001076static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1077 unsigned int *np);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07001078static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
Dean Luickec8a1422017-03-20 17:24:39 -07001079static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07001080static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001081
1082/*
1083 * Error interrupt table entry. This is used as input to the interrupt
1084 * "clear down" routine used for all second tier error interrupt register.
1085 * Second tier interrupt registers have a single bit representing them
1086 * in the top-level CceIntStatus.
1087 */
1088struct err_reg_info {
1089 u32 status; /* status CSR offset */
1090 u32 clear; /* clear CSR offset */
1091 u32 mask; /* mask CSR offset */
1092 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1093 const char *desc;
1094};
1095
1096#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1097#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1098#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1099
1100/*
1101 * Helpers for building HFI and DC error interrupt table entries. Different
1102 * helpers are needed because of inconsistent register names.
1103 */
1104#define EE(reg, handler, desc) \
1105 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1106 handler, desc }
1107#define DC_EE1(reg, handler, desc) \
1108 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1109#define DC_EE2(reg, handler, desc) \
1110 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1111
1112/*
1113 * Table of the "misc" grouping of error interrupts. Each entry refers to
1114 * another register containing more information.
1115 */
1116static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1117/* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1118/* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1119/* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1120/* 3*/ { 0, 0, 0, NULL }, /* reserved */
1121/* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1122/* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1123/* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1124/* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1125 /* the rest are reserved */
1126};
1127
1128/*
1129 * Index into the Various section of the interrupt sources
1130 * corresponding to the Critical Temperature interrupt.
1131 */
1132#define TCRIT_INT_SOURCE 4
1133
1134/*
1135 * SDMA error interrupt entry - refers to another register containing more
1136 * information.
1137 */
1138static const struct err_reg_info sdma_eng_err =
1139 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1140
1141static const struct err_reg_info various_err[NUM_VARIOUS] = {
1142/* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1143/* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1144/* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1145/* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1146/* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1147 /* rest are reserved */
1148};
1149
1150/*
1151 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1152 * register can not be derived from the MTU value because 10K is not
1153 * a power of 2. Therefore, we need a constant. Everything else can
1154 * be calculated.
1155 */
1156#define DCC_CFG_PORT_MTU_CAP_10240 7
1157
1158/*
1159 * Table of the DC grouping of error interrupts. Each entry refers to
1160 * another register containing more information.
1161 */
1162static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1163/* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1164/* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1165/* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1166/* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1167 /* the rest are reserved */
1168};
1169
1170struct cntr_entry {
1171 /*
1172 * counter name
1173 */
1174 char *name;
1175
1176 /*
1177 * csr to read for name (if applicable)
1178 */
1179 u64 csr;
1180
1181 /*
1182 * offset into dd or ppd to store the counter's value
1183 */
1184 int offset;
1185
1186 /*
1187 * flags
1188 */
1189 u8 flags;
1190
1191 /*
1192 * accessor for stat element, context either dd or ppd
1193 */
Jubin John17fb4f22016-02-14 20:21:52 -08001194 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1195 int mode, u64 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001196};
1197
1198#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1199#define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1200
1201#define CNTR_ELEM(name, csr, offset, flags, accessor) \
1202{ \
1203 name, \
1204 csr, \
1205 offset, \
1206 flags, \
1207 accessor \
1208}
1209
1210/* 32bit RXE */
1211#define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1212CNTR_ELEM(#name, \
1213 (counter * 8 + RCV_COUNTER_ARRAY32), \
1214 0, flags | CNTR_32BIT, \
1215 port_access_u32_csr)
1216
1217#define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1218CNTR_ELEM(#name, \
1219 (counter * 8 + RCV_COUNTER_ARRAY32), \
1220 0, flags | CNTR_32BIT, \
1221 dev_access_u32_csr)
1222
1223/* 64bit RXE */
1224#define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1225CNTR_ELEM(#name, \
1226 (counter * 8 + RCV_COUNTER_ARRAY64), \
1227 0, flags, \
1228 port_access_u64_csr)
1229
1230#define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1231CNTR_ELEM(#name, \
1232 (counter * 8 + RCV_COUNTER_ARRAY64), \
1233 0, flags, \
1234 dev_access_u64_csr)
1235
1236#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1237#define OVR_ELM(ctx) \
1238CNTR_ELEM("RcvHdrOvr" #ctx, \
Jubin John8638b772016-02-14 20:19:24 -08001239 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
Mike Marciniszyn77241052015-07-30 15:17:43 -04001240 0, CNTR_NORMAL, port_access_u64_csr)
1241
1242/* 32bit TXE */
1243#define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1244CNTR_ELEM(#name, \
1245 (counter * 8 + SEND_COUNTER_ARRAY32), \
1246 0, flags | CNTR_32BIT, \
1247 port_access_u32_csr)
1248
1249/* 64bit TXE */
1250#define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1251CNTR_ELEM(#name, \
1252 (counter * 8 + SEND_COUNTER_ARRAY64), \
1253 0, flags, \
1254 port_access_u64_csr)
1255
1256# define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1257CNTR_ELEM(#name,\
1258 counter * 8 + SEND_COUNTER_ARRAY64, \
1259 0, \
1260 flags, \
1261 dev_access_u64_csr)
1262
1263/* CCE */
1264#define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1265CNTR_ELEM(#name, \
1266 (counter * 8 + CCE_COUNTER_ARRAY32), \
1267 0, flags | CNTR_32BIT, \
1268 dev_access_u32_csr)
1269
1270#define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1271CNTR_ELEM(#name, \
1272 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1273 0, flags | CNTR_32BIT, \
1274 dev_access_u32_csr)
1275
1276/* DC */
1277#define DC_PERF_CNTR(name, counter, flags) \
1278CNTR_ELEM(#name, \
1279 counter, \
1280 0, \
1281 flags, \
1282 dev_access_u64_csr)
1283
1284#define DC_PERF_CNTR_LCB(name, counter, flags) \
1285CNTR_ELEM(#name, \
1286 counter, \
1287 0, \
1288 flags, \
1289 dc_access_lcb_cntr)
1290
1291/* ibp counters */
1292#define SW_IBP_CNTR(name, cntr) \
1293CNTR_ELEM(#name, \
1294 0, \
1295 0, \
1296 CNTR_SYNTH, \
1297 access_ibp_##cntr)
1298
1299u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1300{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001301 if (dd->flags & HFI1_PRESENT) {
Bhaktipriya Shridhar6d210ee2016-02-25 17:22:11 +05301302 return readq((void __iomem *)dd->kregbase + offset);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001303 }
1304 return -1;
1305}
1306
1307void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1308{
1309 if (dd->flags & HFI1_PRESENT)
1310 writeq(value, (void __iomem *)dd->kregbase + offset);
1311}
1312
1313void __iomem *get_csr_addr(
1314 struct hfi1_devdata *dd,
1315 u32 offset)
1316{
1317 return (void __iomem *)dd->kregbase + offset;
1318}
1319
1320static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1321 int mode, u64 value)
1322{
1323 u64 ret;
1324
Mike Marciniszyn77241052015-07-30 15:17:43 -04001325 if (mode == CNTR_MODE_R) {
1326 ret = read_csr(dd, csr);
1327 } else if (mode == CNTR_MODE_W) {
1328 write_csr(dd, csr, value);
1329 ret = value;
1330 } else {
1331 dd_dev_err(dd, "Invalid cntr register access mode");
1332 return 0;
1333 }
1334
1335 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1336 return ret;
1337}
1338
1339/* Dev Access */
1340static u64 dev_access_u32_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001341 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001342{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301343 struct hfi1_devdata *dd = context;
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001344 u64 csr = entry->csr;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001345
Vennila Megavannana699c6c2016-01-11 18:30:56 -05001346 if (entry->flags & CNTR_SDMA) {
1347 if (vl == CNTR_INVALID_VL)
1348 return 0;
1349 csr += 0x100 * vl;
1350 } else {
1351 if (vl != CNTR_INVALID_VL)
1352 return 0;
1353 }
1354 return read_write_csr(dd, csr, mode, data);
1355}
1356
1357static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1358 void *context, int idx, int mode, u64 data)
1359{
1360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1361
1362 if (dd->per_sdma && idx < dd->num_sdma)
1363 return dd->per_sdma[idx].err_cnt;
1364 return 0;
1365}
1366
1367static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1368 void *context, int idx, int mode, u64 data)
1369{
1370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1371
1372 if (dd->per_sdma && idx < dd->num_sdma)
1373 return dd->per_sdma[idx].sdma_int_cnt;
1374 return 0;
1375}
1376
1377static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1378 void *context, int idx, int mode, u64 data)
1379{
1380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1381
1382 if (dd->per_sdma && idx < dd->num_sdma)
1383 return dd->per_sdma[idx].idle_int_cnt;
1384 return 0;
1385}
1386
1387static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1388 void *context, int idx, int mode,
1389 u64 data)
1390{
1391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1392
1393 if (dd->per_sdma && idx < dd->num_sdma)
1394 return dd->per_sdma[idx].progress_int_cnt;
1395 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001396}
1397
1398static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001399 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001400{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301401 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001402
1403 u64 val = 0;
1404 u64 csr = entry->csr;
1405
1406 if (entry->flags & CNTR_VL) {
1407 if (vl == CNTR_INVALID_VL)
1408 return 0;
1409 csr += 8 * vl;
1410 } else {
1411 if (vl != CNTR_INVALID_VL)
1412 return 0;
1413 }
1414
1415 val = read_write_csr(dd, csr, mode, data);
1416 return val;
1417}
1418
1419static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001420 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001421{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301422 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001423 u32 csr = entry->csr;
1424 int ret = 0;
1425
1426 if (vl != CNTR_INVALID_VL)
1427 return 0;
1428 if (mode == CNTR_MODE_R)
1429 ret = read_lcb_csr(dd, csr, &data);
1430 else if (mode == CNTR_MODE_W)
1431 ret = write_lcb_csr(dd, csr, data);
1432
1433 if (ret) {
1434 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1435 return 0;
1436 }
1437
1438 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1439 return data;
1440}
1441
1442/* Port Access */
1443static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001444 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001445{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301446 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001447
1448 if (vl != CNTR_INVALID_VL)
1449 return 0;
1450 return read_write_csr(ppd->dd, entry->csr, mode, data);
1451}
1452
1453static u64 port_access_u64_csr(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001454 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001455{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301456 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001457 u64 val;
1458 u64 csr = entry->csr;
1459
1460 if (entry->flags & CNTR_VL) {
1461 if (vl == CNTR_INVALID_VL)
1462 return 0;
1463 csr += 8 * vl;
1464 } else {
1465 if (vl != CNTR_INVALID_VL)
1466 return 0;
1467 }
1468 val = read_write_csr(ppd->dd, csr, mode, data);
1469 return val;
1470}
1471
1472/* Software defined */
1473static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1474 u64 data)
1475{
1476 u64 ret;
1477
1478 if (mode == CNTR_MODE_R) {
1479 ret = *cntr;
1480 } else if (mode == CNTR_MODE_W) {
1481 *cntr = data;
1482 ret = data;
1483 } else {
1484 dd_dev_err(dd, "Invalid cntr sw access mode");
1485 return 0;
1486 }
1487
1488 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1489
1490 return ret;
1491}
1492
1493static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001494 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001495{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301496 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001497
1498 if (vl != CNTR_INVALID_VL)
1499 return 0;
1500 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1501}
1502
1503static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
Jubin John17fb4f22016-02-14 20:21:52 -08001504 int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001505{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301506 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001507
1508 if (vl != CNTR_INVALID_VL)
1509 return 0;
1510 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1511}
1512
Dean Luick6d014532015-12-01 15:38:23 -05001513static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1514 void *context, int vl, int mode,
1515 u64 data)
1516{
1517 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1518
1519 if (vl != CNTR_INVALID_VL)
1520 return 0;
1521 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1522}
1523
Mike Marciniszyn77241052015-07-30 15:17:43 -04001524static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001525 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001526{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001527 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1528 u64 zero = 0;
1529 u64 *counter;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001530
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001531 if (vl == CNTR_INVALID_VL)
1532 counter = &ppd->port_xmit_discards;
1533 else if (vl >= 0 && vl < C_VL_COUNT)
1534 counter = &ppd->port_xmit_discards_vl[vl];
1535 else
1536 counter = &zero;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001537
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08001538 return read_write_sw(ppd->dd, counter, mode, data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04001539}
1540
1541static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001542 void *context, int vl, int mode,
1543 u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001544{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301545 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001546
1547 if (vl != CNTR_INVALID_VL)
1548 return 0;
1549
1550 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1551 mode, data);
1552}
1553
1554static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001555 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001556{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301557 struct hfi1_pportdata *ppd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001558
1559 if (vl != CNTR_INVALID_VL)
1560 return 0;
1561
1562 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1563 mode, data);
1564}
1565
1566u64 get_all_cpu_total(u64 __percpu *cntr)
1567{
1568 int cpu;
1569 u64 counter = 0;
1570
1571 for_each_possible_cpu(cpu)
1572 counter += *per_cpu_ptr(cntr, cpu);
1573 return counter;
1574}
1575
1576static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1577 u64 __percpu *cntr,
1578 int vl, int mode, u64 data)
1579{
Mike Marciniszyn77241052015-07-30 15:17:43 -04001580 u64 ret = 0;
1581
1582 if (vl != CNTR_INVALID_VL)
1583 return 0;
1584
1585 if (mode == CNTR_MODE_R) {
1586 ret = get_all_cpu_total(cntr) - *z_val;
1587 } else if (mode == CNTR_MODE_W) {
1588 /* A write can only zero the counter */
1589 if (data == 0)
1590 *z_val = get_all_cpu_total(cntr);
1591 else
1592 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1593 } else {
1594 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1595 return 0;
1596 }
1597
1598 return ret;
1599}
1600
1601static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1602 void *context, int vl, int mode, u64 data)
1603{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301604 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001605
1606 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1607 mode, data);
1608}
1609
1610static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001611 void *context, int vl, int mode, u64 data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04001612{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301613 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001614
1615 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1616 mode, data);
1617}
1618
1619static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1620 void *context, int vl, int mode, u64 data)
1621{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301622 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001623
1624 return dd->verbs_dev.n_piowait;
1625}
1626
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08001627static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1628 void *context, int vl, int mode, u64 data)
1629{
1630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1631
1632 return dd->verbs_dev.n_piodrain;
1633}
1634
Mike Marciniszyn77241052015-07-30 15:17:43 -04001635static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1636 void *context, int vl, int mode, u64 data)
1637{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301638 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001639
1640 return dd->verbs_dev.n_txwait;
1641}
1642
1643static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1644 void *context, int vl, int mode, u64 data)
1645{
Shraddha Barkea787bde2015-10-15 00:58:29 +05301646 struct hfi1_devdata *dd = context;
Mike Marciniszyn77241052015-07-30 15:17:43 -04001647
1648 return dd->verbs_dev.n_kmem_wait;
1649}
1650
Dean Luickb4219222015-10-26 10:28:35 -04001651static u64 access_sw_send_schedule(const struct cntr_entry *entry,
Jubin John17fb4f22016-02-14 20:21:52 -08001652 void *context, int vl, int mode, u64 data)
Dean Luickb4219222015-10-26 10:28:35 -04001653{
1654 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1655
Vennila Megavannan89abfc82016-02-03 14:34:07 -08001656 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1657 mode, data);
Dean Luickb4219222015-10-26 10:28:35 -04001658}
1659
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05001660/* Software counters for the error status bits within MISC_ERR_STATUS */
1661static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1662 void *context, int vl, int mode,
1663 u64 data)
1664{
1665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1666
1667 return dd->misc_err_status_cnt[12];
1668}
1669
1670static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1671 void *context, int vl, int mode,
1672 u64 data)
1673{
1674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1675
1676 return dd->misc_err_status_cnt[11];
1677}
1678
1679static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1680 void *context, int vl, int mode,
1681 u64 data)
1682{
1683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685 return dd->misc_err_status_cnt[10];
1686}
1687
1688static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1689 void *context, int vl,
1690 int mode, u64 data)
1691{
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693
1694 return dd->misc_err_status_cnt[9];
1695}
1696
1697static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1698 void *context, int vl, int mode,
1699 u64 data)
1700{
1701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1702
1703 return dd->misc_err_status_cnt[8];
1704}
1705
1706static u64 access_misc_efuse_read_bad_addr_err_cnt(
1707 const struct cntr_entry *entry,
1708 void *context, int vl, int mode, u64 data)
1709{
1710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1711
1712 return dd->misc_err_status_cnt[7];
1713}
1714
1715static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1716 void *context, int vl,
1717 int mode, u64 data)
1718{
1719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1720
1721 return dd->misc_err_status_cnt[6];
1722}
1723
1724static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1725 void *context, int vl, int mode,
1726 u64 data)
1727{
1728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729
1730 return dd->misc_err_status_cnt[5];
1731}
1732
1733static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1734 void *context, int vl, int mode,
1735 u64 data)
1736{
1737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738
1739 return dd->misc_err_status_cnt[4];
1740}
1741
1742static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1743 void *context, int vl,
1744 int mode, u64 data)
1745{
1746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747
1748 return dd->misc_err_status_cnt[3];
1749}
1750
1751static u64 access_misc_csr_write_bad_addr_err_cnt(
1752 const struct cntr_entry *entry,
1753 void *context, int vl, int mode, u64 data)
1754{
1755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756
1757 return dd->misc_err_status_cnt[2];
1758}
1759
1760static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1761 void *context, int vl,
1762 int mode, u64 data)
1763{
1764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1765
1766 return dd->misc_err_status_cnt[1];
1767}
1768
1769static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1770 void *context, int vl, int mode,
1771 u64 data)
1772{
1773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1774
1775 return dd->misc_err_status_cnt[0];
1776}
1777
1778/*
1779 * Software counter for the aggregate of
1780 * individual CceErrStatus counters
1781 */
1782static u64 access_sw_cce_err_status_aggregated_cnt(
1783 const struct cntr_entry *entry,
1784 void *context, int vl, int mode, u64 data)
1785{
1786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788 return dd->sw_cce_err_status_aggregate;
1789}
1790
1791/*
1792 * Software counters corresponding to each of the
1793 * error status bits within CceErrStatus
1794 */
1795static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1796 void *context, int vl, int mode,
1797 u64 data)
1798{
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801 return dd->cce_err_status_cnt[40];
1802}
1803
1804static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1805 void *context, int vl, int mode,
1806 u64 data)
1807{
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810 return dd->cce_err_status_cnt[39];
1811}
1812
1813static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1814 void *context, int vl, int mode,
1815 u64 data)
1816{
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819 return dd->cce_err_status_cnt[38];
1820}
1821
1822static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl, int mode,
1824 u64 data)
1825{
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828 return dd->cce_err_status_cnt[37];
1829}
1830
1831static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1832 void *context, int vl, int mode,
1833 u64 data)
1834{
1835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1836
1837 return dd->cce_err_status_cnt[36];
1838}
1839
1840static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1841 const struct cntr_entry *entry,
1842 void *context, int vl, int mode, u64 data)
1843{
1844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1845
1846 return dd->cce_err_status_cnt[35];
1847}
1848
1849static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1850 const struct cntr_entry *entry,
1851 void *context, int vl, int mode, u64 data)
1852{
1853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1854
1855 return dd->cce_err_status_cnt[34];
1856}
1857
1858static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1859 void *context, int vl,
1860 int mode, u64 data)
1861{
1862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1863
1864 return dd->cce_err_status_cnt[33];
1865}
1866
1867static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1868 void *context, int vl, int mode,
1869 u64 data)
1870{
1871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1872
1873 return dd->cce_err_status_cnt[32];
1874}
1875
1876static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1877 void *context, int vl, int mode, u64 data)
1878{
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881 return dd->cce_err_status_cnt[31];
1882}
1883
1884static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1886 u64 data)
1887{
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890 return dd->cce_err_status_cnt[30];
1891}
1892
1893static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1894 void *context, int vl, int mode,
1895 u64 data)
1896{
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899 return dd->cce_err_status_cnt[29];
1900}
1901
1902static u64 access_pcic_transmit_back_parity_err_cnt(
1903 const struct cntr_entry *entry,
1904 void *context, int vl, int mode, u64 data)
1905{
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908 return dd->cce_err_status_cnt[28];
1909}
1910
1911static u64 access_pcic_transmit_front_parity_err_cnt(
1912 const struct cntr_entry *entry,
1913 void *context, int vl, int mode, u64 data)
1914{
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917 return dd->cce_err_status_cnt[27];
1918}
1919
1920static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1921 void *context, int vl, int mode,
1922 u64 data)
1923{
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926 return dd->cce_err_status_cnt[26];
1927}
1928
1929static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1930 void *context, int vl, int mode,
1931 u64 data)
1932{
1933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1934
1935 return dd->cce_err_status_cnt[25];
1936}
1937
1938static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1939 void *context, int vl, int mode,
1940 u64 data)
1941{
1942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943
1944 return dd->cce_err_status_cnt[24];
1945}
1946
1947static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1948 void *context, int vl, int mode,
1949 u64 data)
1950{
1951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952
1953 return dd->cce_err_status_cnt[23];
1954}
1955
1956static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1957 void *context, int vl,
1958 int mode, u64 data)
1959{
1960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961
1962 return dd->cce_err_status_cnt[22];
1963}
1964
1965static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1966 void *context, int vl, int mode,
1967 u64 data)
1968{
1969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970
1971 return dd->cce_err_status_cnt[21];
1972}
1973
1974static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1975 const struct cntr_entry *entry,
1976 void *context, int vl, int mode, u64 data)
1977{
1978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979
1980 return dd->cce_err_status_cnt[20];
1981}
1982
1983static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1984 void *context, int vl,
1985 int mode, u64 data)
1986{
1987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988
1989 return dd->cce_err_status_cnt[19];
1990}
1991
1992static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1993 void *context, int vl, int mode,
1994 u64 data)
1995{
1996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997
1998 return dd->cce_err_status_cnt[18];
1999}
2000
2001static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2002 void *context, int vl, int mode,
2003 u64 data)
2004{
2005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006
2007 return dd->cce_err_status_cnt[17];
2008}
2009
2010static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2011 void *context, int vl, int mode,
2012 u64 data)
2013{
2014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015
2016 return dd->cce_err_status_cnt[16];
2017}
2018
2019static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2020 void *context, int vl, int mode,
2021 u64 data)
2022{
2023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024
2025 return dd->cce_err_status_cnt[15];
2026}
2027
2028static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2029 void *context, int vl,
2030 int mode, u64 data)
2031{
2032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033
2034 return dd->cce_err_status_cnt[14];
2035}
2036
2037static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2038 void *context, int vl, int mode,
2039 u64 data)
2040{
2041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042
2043 return dd->cce_err_status_cnt[13];
2044}
2045
2046static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2047 const struct cntr_entry *entry,
2048 void *context, int vl, int mode, u64 data)
2049{
2050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051
2052 return dd->cce_err_status_cnt[12];
2053}
2054
2055static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2056 const struct cntr_entry *entry,
2057 void *context, int vl, int mode, u64 data)
2058{
2059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060
2061 return dd->cce_err_status_cnt[11];
2062}
2063
2064static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2065 const struct cntr_entry *entry,
2066 void *context, int vl, int mode, u64 data)
2067{
2068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069
2070 return dd->cce_err_status_cnt[10];
2071}
2072
2073static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2074 const struct cntr_entry *entry,
2075 void *context, int vl, int mode, u64 data)
2076{
2077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078
2079 return dd->cce_err_status_cnt[9];
2080}
2081
2082static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2083 const struct cntr_entry *entry,
2084 void *context, int vl, int mode, u64 data)
2085{
2086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087
2088 return dd->cce_err_status_cnt[8];
2089}
2090
2091static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2092 void *context, int vl,
2093 int mode, u64 data)
2094{
2095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096
2097 return dd->cce_err_status_cnt[7];
2098}
2099
2100static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2101 const struct cntr_entry *entry,
2102 void *context, int vl, int mode, u64 data)
2103{
2104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105
2106 return dd->cce_err_status_cnt[6];
2107}
2108
2109static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2110 void *context, int vl, int mode,
2111 u64 data)
2112{
2113 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114
2115 return dd->cce_err_status_cnt[5];
2116}
2117
2118static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2119 void *context, int vl, int mode,
2120 u64 data)
2121{
2122 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123
2124 return dd->cce_err_status_cnt[4];
2125}
2126
2127static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2128 const struct cntr_entry *entry,
2129 void *context, int vl, int mode, u64 data)
2130{
2131 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133 return dd->cce_err_status_cnt[3];
2134}
2135
2136static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2137 void *context, int vl,
2138 int mode, u64 data)
2139{
2140 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142 return dd->cce_err_status_cnt[2];
2143}
2144
2145static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2146 void *context, int vl,
2147 int mode, u64 data)
2148{
2149 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151 return dd->cce_err_status_cnt[1];
2152}
2153
2154static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2155 void *context, int vl, int mode,
2156 u64 data)
2157{
2158 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160 return dd->cce_err_status_cnt[0];
2161}
2162
2163/*
2164 * Software counters corresponding to each of the
2165 * error status bits within RcvErrStatus
2166 */
2167static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2168 void *context, int vl, int mode,
2169 u64 data)
2170{
2171 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2172
2173 return dd->rcv_err_status_cnt[63];
2174}
2175
2176static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2177 void *context, int vl,
2178 int mode, u64 data)
2179{
2180 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2181
2182 return dd->rcv_err_status_cnt[62];
2183}
2184
2185static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2186 void *context, int vl, int mode,
2187 u64 data)
2188{
2189 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2190
2191 return dd->rcv_err_status_cnt[61];
2192}
2193
2194static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2195 void *context, int vl, int mode,
2196 u64 data)
2197{
2198 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2199
2200 return dd->rcv_err_status_cnt[60];
2201}
2202
2203static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2204 void *context, int vl,
2205 int mode, u64 data)
2206{
2207 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2208
2209 return dd->rcv_err_status_cnt[59];
2210}
2211
2212static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2213 void *context, int vl,
2214 int mode, u64 data)
2215{
2216 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2217
2218 return dd->rcv_err_status_cnt[58];
2219}
2220
2221static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2222 void *context, int vl, int mode,
2223 u64 data)
2224{
2225 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2226
2227 return dd->rcv_err_status_cnt[57];
2228}
2229
2230static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2231 void *context, int vl, int mode,
2232 u64 data)
2233{
2234 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235
2236 return dd->rcv_err_status_cnt[56];
2237}
2238
2239static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2240 void *context, int vl, int mode,
2241 u64 data)
2242{
2243 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244
2245 return dd->rcv_err_status_cnt[55];
2246}
2247
2248static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2249 const struct cntr_entry *entry,
2250 void *context, int vl, int mode, u64 data)
2251{
2252 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253
2254 return dd->rcv_err_status_cnt[54];
2255}
2256
2257static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2258 const struct cntr_entry *entry,
2259 void *context, int vl, int mode, u64 data)
2260{
2261 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262
2263 return dd->rcv_err_status_cnt[53];
2264}
2265
2266static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2267 void *context, int vl,
2268 int mode, u64 data)
2269{
2270 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271
2272 return dd->rcv_err_status_cnt[52];
2273}
2274
2275static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2276 void *context, int vl,
2277 int mode, u64 data)
2278{
2279 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280
2281 return dd->rcv_err_status_cnt[51];
2282}
2283
2284static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2285 void *context, int vl,
2286 int mode, u64 data)
2287{
2288 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289
2290 return dd->rcv_err_status_cnt[50];
2291}
2292
2293static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2294 void *context, int vl,
2295 int mode, u64 data)
2296{
2297 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298
2299 return dd->rcv_err_status_cnt[49];
2300}
2301
2302static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2303 void *context, int vl,
2304 int mode, u64 data)
2305{
2306 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307
2308 return dd->rcv_err_status_cnt[48];
2309}
2310
2311static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2312 void *context, int vl,
2313 int mode, u64 data)
2314{
2315 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316
2317 return dd->rcv_err_status_cnt[47];
2318}
2319
2320static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2321 void *context, int vl, int mode,
2322 u64 data)
2323{
2324 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325
2326 return dd->rcv_err_status_cnt[46];
2327}
2328
2329static u64 access_rx_hq_intr_csr_parity_err_cnt(
2330 const struct cntr_entry *entry,
2331 void *context, int vl, int mode, u64 data)
2332{
2333 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334
2335 return dd->rcv_err_status_cnt[45];
2336}
2337
2338static u64 access_rx_lookup_csr_parity_err_cnt(
2339 const struct cntr_entry *entry,
2340 void *context, int vl, int mode, u64 data)
2341{
2342 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343
2344 return dd->rcv_err_status_cnt[44];
2345}
2346
2347static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2348 const struct cntr_entry *entry,
2349 void *context, int vl, int mode, u64 data)
2350{
2351 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352
2353 return dd->rcv_err_status_cnt[43];
2354}
2355
2356static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2357 const struct cntr_entry *entry,
2358 void *context, int vl, int mode, u64 data)
2359{
2360 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361
2362 return dd->rcv_err_status_cnt[42];
2363}
2364
2365static u64 access_rx_lookup_des_part2_parity_err_cnt(
2366 const struct cntr_entry *entry,
2367 void *context, int vl, int mode, u64 data)
2368{
2369 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370
2371 return dd->rcv_err_status_cnt[41];
2372}
2373
2374static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2375 const struct cntr_entry *entry,
2376 void *context, int vl, int mode, u64 data)
2377{
2378 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379
2380 return dd->rcv_err_status_cnt[40];
2381}
2382
2383static u64 access_rx_lookup_des_part1_unc_err_cnt(
2384 const struct cntr_entry *entry,
2385 void *context, int vl, int mode, u64 data)
2386{
2387 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388
2389 return dd->rcv_err_status_cnt[39];
2390}
2391
2392static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2393 const struct cntr_entry *entry,
2394 void *context, int vl, int mode, u64 data)
2395{
2396 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397
2398 return dd->rcv_err_status_cnt[38];
2399}
2400
2401static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2402 const struct cntr_entry *entry,
2403 void *context, int vl, int mode, u64 data)
2404{
2405 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406
2407 return dd->rcv_err_status_cnt[37];
2408}
2409
2410static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2411 const struct cntr_entry *entry,
2412 void *context, int vl, int mode, u64 data)
2413{
2414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415
2416 return dd->rcv_err_status_cnt[36];
2417}
2418
2419static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2420 const struct cntr_entry *entry,
2421 void *context, int vl, int mode, u64 data)
2422{
2423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424
2425 return dd->rcv_err_status_cnt[35];
2426}
2427
2428static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2429 const struct cntr_entry *entry,
2430 void *context, int vl, int mode, u64 data)
2431{
2432 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433
2434 return dd->rcv_err_status_cnt[34];
2435}
2436
2437static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2438 const struct cntr_entry *entry,
2439 void *context, int vl, int mode, u64 data)
2440{
2441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442
2443 return dd->rcv_err_status_cnt[33];
2444}
2445
2446static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2447 void *context, int vl, int mode,
2448 u64 data)
2449{
2450 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451
2452 return dd->rcv_err_status_cnt[32];
2453}
2454
2455static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2456 void *context, int vl, int mode,
2457 u64 data)
2458{
2459 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460
2461 return dd->rcv_err_status_cnt[31];
2462}
2463
2464static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2465 void *context, int vl, int mode,
2466 u64 data)
2467{
2468 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469
2470 return dd->rcv_err_status_cnt[30];
2471}
2472
2473static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2474 void *context, int vl, int mode,
2475 u64 data)
2476{
2477 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478
2479 return dd->rcv_err_status_cnt[29];
2480}
2481
2482static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2483 void *context, int vl,
2484 int mode, u64 data)
2485{
2486 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487
2488 return dd->rcv_err_status_cnt[28];
2489}
2490
2491static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2492 const struct cntr_entry *entry,
2493 void *context, int vl, int mode, u64 data)
2494{
2495 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496
2497 return dd->rcv_err_status_cnt[27];
2498}
2499
2500static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2501 const struct cntr_entry *entry,
2502 void *context, int vl, int mode, u64 data)
2503{
2504 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505
2506 return dd->rcv_err_status_cnt[26];
2507}
2508
2509static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2510 const struct cntr_entry *entry,
2511 void *context, int vl, int mode, u64 data)
2512{
2513 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514
2515 return dd->rcv_err_status_cnt[25];
2516}
2517
2518static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2519 const struct cntr_entry *entry,
2520 void *context, int vl, int mode, u64 data)
2521{
2522 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523
2524 return dd->rcv_err_status_cnt[24];
2525}
2526
2527static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2528 const struct cntr_entry *entry,
2529 void *context, int vl, int mode, u64 data)
2530{
2531 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532
2533 return dd->rcv_err_status_cnt[23];
2534}
2535
2536static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2537 const struct cntr_entry *entry,
2538 void *context, int vl, int mode, u64 data)
2539{
2540 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541
2542 return dd->rcv_err_status_cnt[22];
2543}
2544
2545static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2546 const struct cntr_entry *entry,
2547 void *context, int vl, int mode, u64 data)
2548{
2549 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550
2551 return dd->rcv_err_status_cnt[21];
2552}
2553
2554static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2555 const struct cntr_entry *entry,
2556 void *context, int vl, int mode, u64 data)
2557{
2558 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559
2560 return dd->rcv_err_status_cnt[20];
2561}
2562
2563static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2564 const struct cntr_entry *entry,
2565 void *context, int vl, int mode, u64 data)
2566{
2567 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568
2569 return dd->rcv_err_status_cnt[19];
2570}
2571
2572static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2573 void *context, int vl,
2574 int mode, u64 data)
2575{
2576 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577
2578 return dd->rcv_err_status_cnt[18];
2579}
2580
2581static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2582 void *context, int vl,
2583 int mode, u64 data)
2584{
2585 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586
2587 return dd->rcv_err_status_cnt[17];
2588}
2589
2590static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2591 const struct cntr_entry *entry,
2592 void *context, int vl, int mode, u64 data)
2593{
2594 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595
2596 return dd->rcv_err_status_cnt[16];
2597}
2598
2599static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2600 const struct cntr_entry *entry,
2601 void *context, int vl, int mode, u64 data)
2602{
2603 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604
2605 return dd->rcv_err_status_cnt[15];
2606}
2607
2608static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2609 void *context, int vl,
2610 int mode, u64 data)
2611{
2612 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613
2614 return dd->rcv_err_status_cnt[14];
2615}
2616
2617static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2618 void *context, int vl,
2619 int mode, u64 data)
2620{
2621 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622
2623 return dd->rcv_err_status_cnt[13];
2624}
2625
2626static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2627 void *context, int vl, int mode,
2628 u64 data)
2629{
2630 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631
2632 return dd->rcv_err_status_cnt[12];
2633}
2634
2635static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2636 void *context, int vl, int mode,
2637 u64 data)
2638{
2639 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640
2641 return dd->rcv_err_status_cnt[11];
2642}
2643
2644static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2645 void *context, int vl, int mode,
2646 u64 data)
2647{
2648 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649
2650 return dd->rcv_err_status_cnt[10];
2651}
2652
2653static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2654 void *context, int vl, int mode,
2655 u64 data)
2656{
2657 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658
2659 return dd->rcv_err_status_cnt[9];
2660}
2661
2662static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2663 void *context, int vl, int mode,
2664 u64 data)
2665{
2666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667
2668 return dd->rcv_err_status_cnt[8];
2669}
2670
2671static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2672 const struct cntr_entry *entry,
2673 void *context, int vl, int mode, u64 data)
2674{
2675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676
2677 return dd->rcv_err_status_cnt[7];
2678}
2679
2680static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2681 const struct cntr_entry *entry,
2682 void *context, int vl, int mode, u64 data)
2683{
2684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685
2686 return dd->rcv_err_status_cnt[6];
2687}
2688
2689static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2690 void *context, int vl, int mode,
2691 u64 data)
2692{
2693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694
2695 return dd->rcv_err_status_cnt[5];
2696}
2697
2698static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2699 void *context, int vl, int mode,
2700 u64 data)
2701{
2702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703
2704 return dd->rcv_err_status_cnt[4];
2705}
2706
2707static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2708 void *context, int vl, int mode,
2709 u64 data)
2710{
2711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713 return dd->rcv_err_status_cnt[3];
2714}
2715
2716static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2717 void *context, int vl, int mode,
2718 u64 data)
2719{
2720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722 return dd->rcv_err_status_cnt[2];
2723}
2724
2725static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2726 void *context, int vl, int mode,
2727 u64 data)
2728{
2729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731 return dd->rcv_err_status_cnt[1];
2732}
2733
2734static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2735 void *context, int vl, int mode,
2736 u64 data)
2737{
2738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740 return dd->rcv_err_status_cnt[0];
2741}
2742
2743/*
2744 * Software counters corresponding to each of the
2745 * error status bits within SendPioErrStatus
2746 */
2747static u64 access_pio_pec_sop_head_parity_err_cnt(
2748 const struct cntr_entry *entry,
2749 void *context, int vl, int mode, u64 data)
2750{
2751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2752
2753 return dd->send_pio_err_status_cnt[35];
2754}
2755
2756static u64 access_pio_pcc_sop_head_parity_err_cnt(
2757 const struct cntr_entry *entry,
2758 void *context, int vl, int mode, u64 data)
2759{
2760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2761
2762 return dd->send_pio_err_status_cnt[34];
2763}
2764
2765static u64 access_pio_last_returned_cnt_parity_err_cnt(
2766 const struct cntr_entry *entry,
2767 void *context, int vl, int mode, u64 data)
2768{
2769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2770
2771 return dd->send_pio_err_status_cnt[33];
2772}
2773
2774static u64 access_pio_current_free_cnt_parity_err_cnt(
2775 const struct cntr_entry *entry,
2776 void *context, int vl, int mode, u64 data)
2777{
2778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2779
2780 return dd->send_pio_err_status_cnt[32];
2781}
2782
2783static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2784 void *context, int vl, int mode,
2785 u64 data)
2786{
2787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2788
2789 return dd->send_pio_err_status_cnt[31];
2790}
2791
2792static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2793 void *context, int vl, int mode,
2794 u64 data)
2795{
2796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2797
2798 return dd->send_pio_err_status_cnt[30];
2799}
2800
2801static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2802 void *context, int vl, int mode,
2803 u64 data)
2804{
2805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2806
2807 return dd->send_pio_err_status_cnt[29];
2808}
2809
2810static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2811 const struct cntr_entry *entry,
2812 void *context, int vl, int mode, u64 data)
2813{
2814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815
2816 return dd->send_pio_err_status_cnt[28];
2817}
2818
2819static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2820 void *context, int vl, int mode,
2821 u64 data)
2822{
2823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824
2825 return dd->send_pio_err_status_cnt[27];
2826}
2827
2828static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2829 void *context, int vl, int mode,
2830 u64 data)
2831{
2832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833
2834 return dd->send_pio_err_status_cnt[26];
2835}
2836
2837static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2838 void *context, int vl,
2839 int mode, u64 data)
2840{
2841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842
2843 return dd->send_pio_err_status_cnt[25];
2844}
2845
2846static u64 access_pio_block_qw_count_parity_err_cnt(
2847 const struct cntr_entry *entry,
2848 void *context, int vl, int mode, u64 data)
2849{
2850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851
2852 return dd->send_pio_err_status_cnt[24];
2853}
2854
2855static u64 access_pio_write_qw_valid_parity_err_cnt(
2856 const struct cntr_entry *entry,
2857 void *context, int vl, int mode, u64 data)
2858{
2859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860
2861 return dd->send_pio_err_status_cnt[23];
2862}
2863
2864static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2865 void *context, int vl, int mode,
2866 u64 data)
2867{
2868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869
2870 return dd->send_pio_err_status_cnt[22];
2871}
2872
2873static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2874 void *context, int vl,
2875 int mode, u64 data)
2876{
2877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878
2879 return dd->send_pio_err_status_cnt[21];
2880}
2881
2882static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2883 void *context, int vl,
2884 int mode, u64 data)
2885{
2886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887
2888 return dd->send_pio_err_status_cnt[20];
2889}
2890
2891static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2892 void *context, int vl,
2893 int mode, u64 data)
2894{
2895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896
2897 return dd->send_pio_err_status_cnt[19];
2898}
2899
2900static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2901 const struct cntr_entry *entry,
2902 void *context, int vl, int mode, u64 data)
2903{
2904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905
2906 return dd->send_pio_err_status_cnt[18];
2907}
2908
2909static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2910 void *context, int vl, int mode,
2911 u64 data)
2912{
2913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914
2915 return dd->send_pio_err_status_cnt[17];
2916}
2917
2918static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2919 void *context, int vl, int mode,
2920 u64 data)
2921{
2922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923
2924 return dd->send_pio_err_status_cnt[16];
2925}
2926
2927static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2928 const struct cntr_entry *entry,
2929 void *context, int vl, int mode, u64 data)
2930{
2931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932
2933 return dd->send_pio_err_status_cnt[15];
2934}
2935
2936static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2937 const struct cntr_entry *entry,
2938 void *context, int vl, int mode, u64 data)
2939{
2940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941
2942 return dd->send_pio_err_status_cnt[14];
2943}
2944
2945static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2946 const struct cntr_entry *entry,
2947 void *context, int vl, int mode, u64 data)
2948{
2949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950
2951 return dd->send_pio_err_status_cnt[13];
2952}
2953
2954static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2955 const struct cntr_entry *entry,
2956 void *context, int vl, int mode, u64 data)
2957{
2958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959
2960 return dd->send_pio_err_status_cnt[12];
2961}
2962
2963static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2964 const struct cntr_entry *entry,
2965 void *context, int vl, int mode, u64 data)
2966{
2967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968
2969 return dd->send_pio_err_status_cnt[11];
2970}
2971
2972static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2973 const struct cntr_entry *entry,
2974 void *context, int vl, int mode, u64 data)
2975{
2976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977
2978 return dd->send_pio_err_status_cnt[10];
2979}
2980
2981static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2982 const struct cntr_entry *entry,
2983 void *context, int vl, int mode, u64 data)
2984{
2985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986
2987 return dd->send_pio_err_status_cnt[9];
2988}
2989
2990static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2991 const struct cntr_entry *entry,
2992 void *context, int vl, int mode, u64 data)
2993{
2994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995
2996 return dd->send_pio_err_status_cnt[8];
2997}
2998
2999static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3000 const struct cntr_entry *entry,
3001 void *context, int vl, int mode, u64 data)
3002{
3003 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004
3005 return dd->send_pio_err_status_cnt[7];
3006}
3007
3008static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3009 void *context, int vl, int mode,
3010 u64 data)
3011{
3012 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013
3014 return dd->send_pio_err_status_cnt[6];
3015}
3016
3017static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3018 void *context, int vl, int mode,
3019 u64 data)
3020{
3021 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022
3023 return dd->send_pio_err_status_cnt[5];
3024}
3025
3026static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3027 void *context, int vl, int mode,
3028 u64 data)
3029{
3030 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031
3032 return dd->send_pio_err_status_cnt[4];
3033}
3034
3035static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3036 void *context, int vl, int mode,
3037 u64 data)
3038{
3039 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041 return dd->send_pio_err_status_cnt[3];
3042}
3043
3044static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3045 void *context, int vl, int mode,
3046 u64 data)
3047{
3048 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050 return dd->send_pio_err_status_cnt[2];
3051}
3052
3053static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3054 void *context, int vl,
3055 int mode, u64 data)
3056{
3057 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059 return dd->send_pio_err_status_cnt[1];
3060}
3061
3062static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3063 void *context, int vl, int mode,
3064 u64 data)
3065{
3066 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068 return dd->send_pio_err_status_cnt[0];
3069}
3070
3071/*
3072 * Software counters corresponding to each of the
3073 * error status bits within SendDmaErrStatus
3074 */
3075static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3076 const struct cntr_entry *entry,
3077 void *context, int vl, int mode, u64 data)
3078{
3079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080
3081 return dd->send_dma_err_status_cnt[3];
3082}
3083
3084static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3085 const struct cntr_entry *entry,
3086 void *context, int vl, int mode, u64 data)
3087{
3088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090 return dd->send_dma_err_status_cnt[2];
3091}
3092
3093static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3094 void *context, int vl, int mode,
3095 u64 data)
3096{
3097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099 return dd->send_dma_err_status_cnt[1];
3100}
3101
3102static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3103 void *context, int vl, int mode,
3104 u64 data)
3105{
3106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108 return dd->send_dma_err_status_cnt[0];
3109}
3110
3111/*
3112 * Software counters corresponding to each of the
3113 * error status bits within SendEgressErrStatus
3114 */
3115static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3116 const struct cntr_entry *entry,
3117 void *context, int vl, int mode, u64 data)
3118{
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121 return dd->send_egress_err_status_cnt[63];
3122}
3123
3124static u64 access_tx_read_sdma_memory_csr_err_cnt(
3125 const struct cntr_entry *entry,
3126 void *context, int vl, int mode, u64 data)
3127{
3128 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3129
3130 return dd->send_egress_err_status_cnt[62];
3131}
3132
3133static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3134 void *context, int vl, int mode,
3135 u64 data)
3136{
3137 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3138
3139 return dd->send_egress_err_status_cnt[61];
3140}
3141
3142static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3143 void *context, int vl,
3144 int mode, u64 data)
3145{
3146 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3147
3148 return dd->send_egress_err_status_cnt[60];
3149}
3150
3151static u64 access_tx_read_sdma_memory_cor_err_cnt(
3152 const struct cntr_entry *entry,
3153 void *context, int vl, int mode, u64 data)
3154{
3155 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3156
3157 return dd->send_egress_err_status_cnt[59];
3158}
3159
3160static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3161 void *context, int vl, int mode,
3162 u64 data)
3163{
3164 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3165
3166 return dd->send_egress_err_status_cnt[58];
3167}
3168
3169static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3170 void *context, int vl, int mode,
3171 u64 data)
3172{
3173 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3174
3175 return dd->send_egress_err_status_cnt[57];
3176}
3177
3178static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3179 void *context, int vl, int mode,
3180 u64 data)
3181{
3182 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183
3184 return dd->send_egress_err_status_cnt[56];
3185}
3186
3187static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3188 void *context, int vl, int mode,
3189 u64 data)
3190{
3191 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192
3193 return dd->send_egress_err_status_cnt[55];
3194}
3195
3196static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3197 void *context, int vl, int mode,
3198 u64 data)
3199{
3200 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201
3202 return dd->send_egress_err_status_cnt[54];
3203}
3204
3205static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3206 void *context, int vl, int mode,
3207 u64 data)
3208{
3209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210
3211 return dd->send_egress_err_status_cnt[53];
3212}
3213
3214static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3215 void *context, int vl, int mode,
3216 u64 data)
3217{
3218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219
3220 return dd->send_egress_err_status_cnt[52];
3221}
3222
3223static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3224 void *context, int vl, int mode,
3225 u64 data)
3226{
3227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228
3229 return dd->send_egress_err_status_cnt[51];
3230}
3231
3232static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3233 void *context, int vl, int mode,
3234 u64 data)
3235{
3236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237
3238 return dd->send_egress_err_status_cnt[50];
3239}
3240
3241static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3242 void *context, int vl, int mode,
3243 u64 data)
3244{
3245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246
3247 return dd->send_egress_err_status_cnt[49];
3248}
3249
3250static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3251 void *context, int vl, int mode,
3252 u64 data)
3253{
3254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255
3256 return dd->send_egress_err_status_cnt[48];
3257}
3258
3259static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3260 void *context, int vl, int mode,
3261 u64 data)
3262{
3263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264
3265 return dd->send_egress_err_status_cnt[47];
3266}
3267
3268static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3269 void *context, int vl, int mode,
3270 u64 data)
3271{
3272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273
3274 return dd->send_egress_err_status_cnt[46];
3275}
3276
3277static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3278 void *context, int vl, int mode,
3279 u64 data)
3280{
3281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282
3283 return dd->send_egress_err_status_cnt[45];
3284}
3285
3286static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3287 void *context, int vl,
3288 int mode, u64 data)
3289{
3290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291
3292 return dd->send_egress_err_status_cnt[44];
3293}
3294
3295static u64 access_tx_read_sdma_memory_unc_err_cnt(
3296 const struct cntr_entry *entry,
3297 void *context, int vl, int mode, u64 data)
3298{
3299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300
3301 return dd->send_egress_err_status_cnt[43];
3302}
3303
3304static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3305 void *context, int vl, int mode,
3306 u64 data)
3307{
3308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309
3310 return dd->send_egress_err_status_cnt[42];
3311}
3312
3313static u64 access_tx_credit_return_partiy_err_cnt(
3314 const struct cntr_entry *entry,
3315 void *context, int vl, int mode, u64 data)
3316{
3317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318
3319 return dd->send_egress_err_status_cnt[41];
3320}
3321
3322static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3323 const struct cntr_entry *entry,
3324 void *context, int vl, int mode, u64 data)
3325{
3326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327
3328 return dd->send_egress_err_status_cnt[40];
3329}
3330
3331static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3332 const struct cntr_entry *entry,
3333 void *context, int vl, int mode, u64 data)
3334{
3335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336
3337 return dd->send_egress_err_status_cnt[39];
3338}
3339
3340static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3341 const struct cntr_entry *entry,
3342 void *context, int vl, int mode, u64 data)
3343{
3344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345
3346 return dd->send_egress_err_status_cnt[38];
3347}
3348
3349static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3350 const struct cntr_entry *entry,
3351 void *context, int vl, int mode, u64 data)
3352{
3353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354
3355 return dd->send_egress_err_status_cnt[37];
3356}
3357
3358static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3359 const struct cntr_entry *entry,
3360 void *context, int vl, int mode, u64 data)
3361{
3362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363
3364 return dd->send_egress_err_status_cnt[36];
3365}
3366
3367static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3368 const struct cntr_entry *entry,
3369 void *context, int vl, int mode, u64 data)
3370{
3371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372
3373 return dd->send_egress_err_status_cnt[35];
3374}
3375
3376static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3377 const struct cntr_entry *entry,
3378 void *context, int vl, int mode, u64 data)
3379{
3380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381
3382 return dd->send_egress_err_status_cnt[34];
3383}
3384
3385static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3386 const struct cntr_entry *entry,
3387 void *context, int vl, int mode, u64 data)
3388{
3389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390
3391 return dd->send_egress_err_status_cnt[33];
3392}
3393
3394static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3395 const struct cntr_entry *entry,
3396 void *context, int vl, int mode, u64 data)
3397{
3398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399
3400 return dd->send_egress_err_status_cnt[32];
3401}
3402
3403static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3404 const struct cntr_entry *entry,
3405 void *context, int vl, int mode, u64 data)
3406{
3407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408
3409 return dd->send_egress_err_status_cnt[31];
3410}
3411
3412static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3413 const struct cntr_entry *entry,
3414 void *context, int vl, int mode, u64 data)
3415{
3416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417
3418 return dd->send_egress_err_status_cnt[30];
3419}
3420
3421static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3422 const struct cntr_entry *entry,
3423 void *context, int vl, int mode, u64 data)
3424{
3425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426
3427 return dd->send_egress_err_status_cnt[29];
3428}
3429
3430static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3431 const struct cntr_entry *entry,
3432 void *context, int vl, int mode, u64 data)
3433{
3434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435
3436 return dd->send_egress_err_status_cnt[28];
3437}
3438
3439static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3440 const struct cntr_entry *entry,
3441 void *context, int vl, int mode, u64 data)
3442{
3443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444
3445 return dd->send_egress_err_status_cnt[27];
3446}
3447
3448static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3449 const struct cntr_entry *entry,
3450 void *context, int vl, int mode, u64 data)
3451{
3452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453
3454 return dd->send_egress_err_status_cnt[26];
3455}
3456
3457static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3458 const struct cntr_entry *entry,
3459 void *context, int vl, int mode, u64 data)
3460{
3461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462
3463 return dd->send_egress_err_status_cnt[25];
3464}
3465
3466static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3467 const struct cntr_entry *entry,
3468 void *context, int vl, int mode, u64 data)
3469{
3470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471
3472 return dd->send_egress_err_status_cnt[24];
3473}
3474
3475static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3476 const struct cntr_entry *entry,
3477 void *context, int vl, int mode, u64 data)
3478{
3479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480
3481 return dd->send_egress_err_status_cnt[23];
3482}
3483
3484static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3485 const struct cntr_entry *entry,
3486 void *context, int vl, int mode, u64 data)
3487{
3488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489
3490 return dd->send_egress_err_status_cnt[22];
3491}
3492
3493static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3494 const struct cntr_entry *entry,
3495 void *context, int vl, int mode, u64 data)
3496{
3497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498
3499 return dd->send_egress_err_status_cnt[21];
3500}
3501
3502static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3503 const struct cntr_entry *entry,
3504 void *context, int vl, int mode, u64 data)
3505{
3506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507
3508 return dd->send_egress_err_status_cnt[20];
3509}
3510
3511static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3512 const struct cntr_entry *entry,
3513 void *context, int vl, int mode, u64 data)
3514{
3515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516
3517 return dd->send_egress_err_status_cnt[19];
3518}
3519
3520static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3521 const struct cntr_entry *entry,
3522 void *context, int vl, int mode, u64 data)
3523{
3524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525
3526 return dd->send_egress_err_status_cnt[18];
3527}
3528
3529static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3530 const struct cntr_entry *entry,
3531 void *context, int vl, int mode, u64 data)
3532{
3533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534
3535 return dd->send_egress_err_status_cnt[17];
3536}
3537
3538static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3539 const struct cntr_entry *entry,
3540 void *context, int vl, int mode, u64 data)
3541{
3542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543
3544 return dd->send_egress_err_status_cnt[16];
3545}
3546
3547static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3548 void *context, int vl, int mode,
3549 u64 data)
3550{
3551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552
3553 return dd->send_egress_err_status_cnt[15];
3554}
3555
3556static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3557 void *context, int vl,
3558 int mode, u64 data)
3559{
3560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561
3562 return dd->send_egress_err_status_cnt[14];
3563}
3564
3565static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3566 void *context, int vl, int mode,
3567 u64 data)
3568{
3569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570
3571 return dd->send_egress_err_status_cnt[13];
3572}
3573
3574static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3575 void *context, int vl, int mode,
3576 u64 data)
3577{
3578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579
3580 return dd->send_egress_err_status_cnt[12];
3581}
3582
3583static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3584 const struct cntr_entry *entry,
3585 void *context, int vl, int mode, u64 data)
3586{
3587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588
3589 return dd->send_egress_err_status_cnt[11];
3590}
3591
3592static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3593 void *context, int vl, int mode,
3594 u64 data)
3595{
3596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597
3598 return dd->send_egress_err_status_cnt[10];
3599}
3600
3601static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3602 void *context, int vl, int mode,
3603 u64 data)
3604{
3605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606
3607 return dd->send_egress_err_status_cnt[9];
3608}
3609
3610static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3611 const struct cntr_entry *entry,
3612 void *context, int vl, int mode, u64 data)
3613{
3614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615
3616 return dd->send_egress_err_status_cnt[8];
3617}
3618
3619static u64 access_tx_pio_launch_intf_parity_err_cnt(
3620 const struct cntr_entry *entry,
3621 void *context, int vl, int mode, u64 data)
3622{
3623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624
3625 return dd->send_egress_err_status_cnt[7];
3626}
3627
3628static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3629 void *context, int vl, int mode,
3630 u64 data)
3631{
3632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633
3634 return dd->send_egress_err_status_cnt[6];
3635}
3636
3637static u64 access_tx_incorrect_link_state_err_cnt(
3638 const struct cntr_entry *entry,
3639 void *context, int vl, int mode, u64 data)
3640{
3641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642
3643 return dd->send_egress_err_status_cnt[5];
3644}
3645
3646static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3647 void *context, int vl, int mode,
3648 u64 data)
3649{
3650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651
3652 return dd->send_egress_err_status_cnt[4];
3653}
3654
3655static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3656 const struct cntr_entry *entry,
3657 void *context, int vl, int mode, u64 data)
3658{
3659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661 return dd->send_egress_err_status_cnt[3];
3662}
3663
3664static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3665 void *context, int vl, int mode,
3666 u64 data)
3667{
3668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669
3670 return dd->send_egress_err_status_cnt[2];
3671}
3672
3673static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3674 const struct cntr_entry *entry,
3675 void *context, int vl, int mode, u64 data)
3676{
3677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678
3679 return dd->send_egress_err_status_cnt[1];
3680}
3681
3682static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3683 const struct cntr_entry *entry,
3684 void *context, int vl, int mode, u64 data)
3685{
3686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3687
3688 return dd->send_egress_err_status_cnt[0];
3689}
3690
3691/*
3692 * Software counters corresponding to each of the
3693 * error status bits within SendErrStatus
3694 */
3695static u64 access_send_csr_write_bad_addr_err_cnt(
3696 const struct cntr_entry *entry,
3697 void *context, int vl, int mode, u64 data)
3698{
3699 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701 return dd->send_err_status_cnt[2];
3702}
3703
3704static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3705 void *context, int vl,
3706 int mode, u64 data)
3707{
3708 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710 return dd->send_err_status_cnt[1];
3711}
3712
3713static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3714 void *context, int vl, int mode,
3715 u64 data)
3716{
3717 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718
3719 return dd->send_err_status_cnt[0];
3720}
3721
3722/*
3723 * Software counters corresponding to each of the
3724 * error status bits within SendCtxtErrStatus
3725 */
3726static u64 access_pio_write_out_of_bounds_err_cnt(
3727 const struct cntr_entry *entry,
3728 void *context, int vl, int mode, u64 data)
3729{
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732 return dd->sw_ctxt_err_status_cnt[4];
3733}
3734
3735static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3736 void *context, int vl, int mode,
3737 u64 data)
3738{
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741 return dd->sw_ctxt_err_status_cnt[3];
3742}
3743
3744static u64 access_pio_write_crosses_boundary_err_cnt(
3745 const struct cntr_entry *entry,
3746 void *context, int vl, int mode, u64 data)
3747{
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750 return dd->sw_ctxt_err_status_cnt[2];
3751}
3752
3753static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3754 void *context, int vl,
3755 int mode, u64 data)
3756{
3757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759 return dd->sw_ctxt_err_status_cnt[1];
3760}
3761
3762static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3763 void *context, int vl, int mode,
3764 u64 data)
3765{
3766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768 return dd->sw_ctxt_err_status_cnt[0];
3769}
3770
3771/*
3772 * Software counters corresponding to each of the
3773 * error status bits within SendDmaEngErrStatus
3774 */
3775static u64 access_sdma_header_request_fifo_cor_err_cnt(
3776 const struct cntr_entry *entry,
3777 void *context, int vl, int mode, u64 data)
3778{
3779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3780
3781 return dd->sw_send_dma_eng_err_status_cnt[23];
3782}
3783
3784static u64 access_sdma_header_storage_cor_err_cnt(
3785 const struct cntr_entry *entry,
3786 void *context, int vl, int mode, u64 data)
3787{
3788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3789
3790 return dd->sw_send_dma_eng_err_status_cnt[22];
3791}
3792
3793static u64 access_sdma_packet_tracking_cor_err_cnt(
3794 const struct cntr_entry *entry,
3795 void *context, int vl, int mode, u64 data)
3796{
3797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3798
3799 return dd->sw_send_dma_eng_err_status_cnt[21];
3800}
3801
3802static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3803 void *context, int vl, int mode,
3804 u64 data)
3805{
3806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3807
3808 return dd->sw_send_dma_eng_err_status_cnt[20];
3809}
3810
3811static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3812 void *context, int vl, int mode,
3813 u64 data)
3814{
3815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3816
3817 return dd->sw_send_dma_eng_err_status_cnt[19];
3818}
3819
3820static u64 access_sdma_header_request_fifo_unc_err_cnt(
3821 const struct cntr_entry *entry,
3822 void *context, int vl, int mode, u64 data)
3823{
3824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3825
3826 return dd->sw_send_dma_eng_err_status_cnt[18];
3827}
3828
3829static u64 access_sdma_header_storage_unc_err_cnt(
3830 const struct cntr_entry *entry,
3831 void *context, int vl, int mode, u64 data)
3832{
3833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3834
3835 return dd->sw_send_dma_eng_err_status_cnt[17];
3836}
3837
3838static u64 access_sdma_packet_tracking_unc_err_cnt(
3839 const struct cntr_entry *entry,
3840 void *context, int vl, int mode, u64 data)
3841{
3842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843
3844 return dd->sw_send_dma_eng_err_status_cnt[16];
3845}
3846
3847static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3848 void *context, int vl, int mode,
3849 u64 data)
3850{
3851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852
3853 return dd->sw_send_dma_eng_err_status_cnt[15];
3854}
3855
3856static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3857 void *context, int vl, int mode,
3858 u64 data)
3859{
3860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861
3862 return dd->sw_send_dma_eng_err_status_cnt[14];
3863}
3864
3865static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3866 void *context, int vl, int mode,
3867 u64 data)
3868{
3869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870
3871 return dd->sw_send_dma_eng_err_status_cnt[13];
3872}
3873
3874static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3875 void *context, int vl, int mode,
3876 u64 data)
3877{
3878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879
3880 return dd->sw_send_dma_eng_err_status_cnt[12];
3881}
3882
3883static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3884 void *context, int vl, int mode,
3885 u64 data)
3886{
3887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888
3889 return dd->sw_send_dma_eng_err_status_cnt[11];
3890}
3891
3892static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3893 void *context, int vl, int mode,
3894 u64 data)
3895{
3896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3897
3898 return dd->sw_send_dma_eng_err_status_cnt[10];
3899}
3900
3901static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3902 void *context, int vl, int mode,
3903 u64 data)
3904{
3905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3906
3907 return dd->sw_send_dma_eng_err_status_cnt[9];
3908}
3909
3910static u64 access_sdma_packet_desc_overflow_err_cnt(
3911 const struct cntr_entry *entry,
3912 void *context, int vl, int mode, u64 data)
3913{
3914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3915
3916 return dd->sw_send_dma_eng_err_status_cnt[8];
3917}
3918
3919static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3920 void *context, int vl,
3921 int mode, u64 data)
3922{
3923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3924
3925 return dd->sw_send_dma_eng_err_status_cnt[7];
3926}
3927
3928static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3929 void *context, int vl, int mode, u64 data)
3930{
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933 return dd->sw_send_dma_eng_err_status_cnt[6];
3934}
3935
3936static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3938 u64 data)
3939{
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942 return dd->sw_send_dma_eng_err_status_cnt[5];
3943}
3944
3945static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3946 void *context, int vl, int mode,
3947 u64 data)
3948{
3949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951 return dd->sw_send_dma_eng_err_status_cnt[4];
3952}
3953
3954static u64 access_sdma_tail_out_of_bounds_err_cnt(
3955 const struct cntr_entry *entry,
3956 void *context, int vl, int mode, u64 data)
3957{
3958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960 return dd->sw_send_dma_eng_err_status_cnt[3];
3961}
3962
3963static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3964 void *context, int vl, int mode,
3965 u64 data)
3966{
3967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968
3969 return dd->sw_send_dma_eng_err_status_cnt[2];
3970}
3971
3972static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3973 void *context, int vl, int mode,
3974 u64 data)
3975{
3976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977
3978 return dd->sw_send_dma_eng_err_status_cnt[1];
3979}
3980
3981static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3982 void *context, int vl, int mode,
3983 u64 data)
3984{
3985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3986
3987 return dd->sw_send_dma_eng_err_status_cnt[0];
3988}
3989
Jakub Pawlak2b719042016-07-01 16:01:22 -07003990static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3991 void *context, int vl, int mode,
3992 u64 data)
3993{
3994 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3995
3996 u64 val = 0;
3997 u64 csr = entry->csr;
3998
3999 val = read_write_csr(dd, csr, mode, data);
4000 if (mode == CNTR_MODE_R) {
4001 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4002 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4003 } else if (mode == CNTR_MODE_W) {
4004 dd->sw_rcv_bypass_packet_errors = 0;
4005 } else {
4006 dd_dev_err(dd, "Invalid cntr register access mode");
4007 return 0;
4008 }
4009 return val;
4010}
4011
Mike Marciniszyn77241052015-07-30 15:17:43 -04004012#define def_access_sw_cpu(cntr) \
4013static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4014 void *context, int vl, int mode, u64 data) \
4015{ \
4016 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004017 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4018 ppd->ibport_data.rvp.cntr, vl, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004019 mode, data); \
4020}
4021
4022def_access_sw_cpu(rc_acks);
4023def_access_sw_cpu(rc_qacks);
4024def_access_sw_cpu(rc_delayed_comp);
4025
4026#define def_access_ibp_counter(cntr) \
4027static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4028 void *context, int vl, int mode, u64 data) \
4029{ \
4030 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4031 \
4032 if (vl != CNTR_INVALID_VL) \
4033 return 0; \
4034 \
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08004035 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
Mike Marciniszyn77241052015-07-30 15:17:43 -04004036 mode, data); \
4037}
4038
4039def_access_ibp_counter(loop_pkts);
4040def_access_ibp_counter(rc_resends);
4041def_access_ibp_counter(rnr_naks);
4042def_access_ibp_counter(other_naks);
4043def_access_ibp_counter(rc_timeouts);
4044def_access_ibp_counter(pkt_drops);
4045def_access_ibp_counter(dmawait);
4046def_access_ibp_counter(rc_seqnak);
4047def_access_ibp_counter(rc_dupreq);
4048def_access_ibp_counter(rdma_seq);
4049def_access_ibp_counter(unaligned);
4050def_access_ibp_counter(seq_naks);
4051
4052static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4053[C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4054[C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4055 CNTR_NORMAL),
4056[C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4057 CNTR_NORMAL),
4058[C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4059 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4060 CNTR_NORMAL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004061[C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4062 CNTR_NORMAL),
4063[C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4064 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4065[C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4066 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4067[C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4068 CNTR_NORMAL),
4069[C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4070 CNTR_NORMAL),
4071[C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4072 CNTR_NORMAL),
4073[C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4074 CNTR_NORMAL),
4075[C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4076 CNTR_NORMAL),
4077[C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4078 CNTR_NORMAL),
4079[C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4080 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4081[C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4082 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4083[C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4084 CNTR_SYNTH),
Jakub Pawlak2b719042016-07-01 16:01:22 -07004085[C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4086 access_dc_rcv_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004087[C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4088 CNTR_SYNTH),
4089[C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4090 CNTR_SYNTH),
4091[C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4092 CNTR_SYNTH),
4093[C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4094 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4095[C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4096 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4097 CNTR_SYNTH),
4098[C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4099 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4100[C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4101 CNTR_SYNTH),
4102[C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4103 CNTR_SYNTH),
4104[C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4105 CNTR_SYNTH),
4106[C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4107 CNTR_SYNTH),
4108[C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4109 CNTR_SYNTH),
4110[C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4111 CNTR_SYNTH),
4112[C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4113 CNTR_SYNTH),
4114[C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4115 CNTR_SYNTH | CNTR_VL),
4116[C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4117 CNTR_SYNTH | CNTR_VL),
4118[C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4119[C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4120 CNTR_SYNTH | CNTR_VL),
4121[C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4122[C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4123 CNTR_SYNTH | CNTR_VL),
4124[C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4125 CNTR_SYNTH),
4126[C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4127 CNTR_SYNTH | CNTR_VL),
4128[C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4129 CNTR_SYNTH),
4130[C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4131 CNTR_SYNTH | CNTR_VL),
4132[C_DC_TOTAL_CRC] =
4133 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4134 CNTR_SYNTH),
4135[C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4136 CNTR_SYNTH),
4137[C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4138 CNTR_SYNTH),
4139[C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4140 CNTR_SYNTH),
4141[C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4142 CNTR_SYNTH),
4143[C_DC_CRC_MULT_LN] =
4144 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4145 CNTR_SYNTH),
4146[C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4147 CNTR_SYNTH),
4148[C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4149 CNTR_SYNTH),
4150[C_DC_SEQ_CRC_CNT] =
4151 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4152 CNTR_SYNTH),
4153[C_DC_ESC0_ONLY_CNT] =
4154 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4155 CNTR_SYNTH),
4156[C_DC_ESC0_PLUS1_CNT] =
4157 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4158 CNTR_SYNTH),
4159[C_DC_ESC0_PLUS2_CNT] =
4160 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4161 CNTR_SYNTH),
4162[C_DC_REINIT_FROM_PEER_CNT] =
4163 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4164 CNTR_SYNTH),
4165[C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4166 CNTR_SYNTH),
4167[C_DC_MISC_FLG_CNT] =
4168 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4169 CNTR_SYNTH),
4170[C_DC_PRF_GOOD_LTP_CNT] =
4171 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4172[C_DC_PRF_ACCEPTED_LTP_CNT] =
4173 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4174 CNTR_SYNTH),
4175[C_DC_PRF_RX_FLIT_CNT] =
4176 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4177[C_DC_PRF_TX_FLIT_CNT] =
4178 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4179[C_DC_PRF_CLK_CNTR] =
4180 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4181[C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4182 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4183[C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4184 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4185 CNTR_SYNTH),
4186[C_DC_PG_STS_TX_SBE_CNT] =
4187 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4188[C_DC_PG_STS_TX_MBE_CNT] =
4189 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4190 CNTR_SYNTH),
4191[C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4192 access_sw_cpu_intr),
4193[C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4194 access_sw_cpu_rcv_limit),
4195[C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4196 access_sw_vtx_wait),
4197[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4198 access_sw_pio_wait),
Mike Marciniszyn14553ca2016-02-14 12:45:36 -08004199[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4200 access_sw_pio_drain),
Mike Marciniszyn77241052015-07-30 15:17:43 -04004201[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4202 access_sw_kmem_wait),
Dean Luickb4219222015-10-26 10:28:35 -04004203[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4204 access_sw_send_schedule),
Vennila Megavannana699c6c2016-01-11 18:30:56 -05004205[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4206 SEND_DMA_DESC_FETCHED_CNT, 0,
4207 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4208 dev_access_u32_csr),
4209[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4210 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4211 access_sde_int_cnt),
4212[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4213 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4214 access_sde_err_cnt),
4215[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4216 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4217 access_sde_idle_int_cnt),
4218[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4219 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4220 access_sde_progress_int_cnt),
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05004221/* MISC_ERR_STATUS */
4222[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4223 CNTR_NORMAL,
4224 access_misc_pll_lock_fail_err_cnt),
4225[C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4226 CNTR_NORMAL,
4227 access_misc_mbist_fail_err_cnt),
4228[C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4229 CNTR_NORMAL,
4230 access_misc_invalid_eep_cmd_err_cnt),
4231[C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4232 CNTR_NORMAL,
4233 access_misc_efuse_done_parity_err_cnt),
4234[C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4235 CNTR_NORMAL,
4236 access_misc_efuse_write_err_cnt),
4237[C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4238 0, CNTR_NORMAL,
4239 access_misc_efuse_read_bad_addr_err_cnt),
4240[C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4241 CNTR_NORMAL,
4242 access_misc_efuse_csr_parity_err_cnt),
4243[C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4244 CNTR_NORMAL,
4245 access_misc_fw_auth_failed_err_cnt),
4246[C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4247 CNTR_NORMAL,
4248 access_misc_key_mismatch_err_cnt),
4249[C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4250 CNTR_NORMAL,
4251 access_misc_sbus_write_failed_err_cnt),
4252[C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4253 CNTR_NORMAL,
4254 access_misc_csr_write_bad_addr_err_cnt),
4255[C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4256 CNTR_NORMAL,
4257 access_misc_csr_read_bad_addr_err_cnt),
4258[C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4259 CNTR_NORMAL,
4260 access_misc_csr_parity_err_cnt),
4261/* CceErrStatus */
4262[C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4263 CNTR_NORMAL,
4264 access_sw_cce_err_status_aggregated_cnt),
4265[C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4266 CNTR_NORMAL,
4267 access_cce_msix_csr_parity_err_cnt),
4268[C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4269 CNTR_NORMAL,
4270 access_cce_int_map_unc_err_cnt),
4271[C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4272 CNTR_NORMAL,
4273 access_cce_int_map_cor_err_cnt),
4274[C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4275 CNTR_NORMAL,
4276 access_cce_msix_table_unc_err_cnt),
4277[C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4278 CNTR_NORMAL,
4279 access_cce_msix_table_cor_err_cnt),
4280[C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4281 0, CNTR_NORMAL,
4282 access_cce_rxdma_conv_fifo_parity_err_cnt),
4283[C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4284 0, CNTR_NORMAL,
4285 access_cce_rcpl_async_fifo_parity_err_cnt),
4286[C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4287 CNTR_NORMAL,
4288 access_cce_seg_write_bad_addr_err_cnt),
4289[C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4290 CNTR_NORMAL,
4291 access_cce_seg_read_bad_addr_err_cnt),
4292[C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4293 CNTR_NORMAL,
4294 access_la_triggered_cnt),
4295[C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4296 CNTR_NORMAL,
4297 access_cce_trgt_cpl_timeout_err_cnt),
4298[C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4299 CNTR_NORMAL,
4300 access_pcic_receive_parity_err_cnt),
4301[C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4302 CNTR_NORMAL,
4303 access_pcic_transmit_back_parity_err_cnt),
4304[C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4305 0, CNTR_NORMAL,
4306 access_pcic_transmit_front_parity_err_cnt),
4307[C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4308 CNTR_NORMAL,
4309 access_pcic_cpl_dat_q_unc_err_cnt),
4310[C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4311 CNTR_NORMAL,
4312 access_pcic_cpl_hd_q_unc_err_cnt),
4313[C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4314 CNTR_NORMAL,
4315 access_pcic_post_dat_q_unc_err_cnt),
4316[C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4317 CNTR_NORMAL,
4318 access_pcic_post_hd_q_unc_err_cnt),
4319[C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4320 CNTR_NORMAL,
4321 access_pcic_retry_sot_mem_unc_err_cnt),
4322[C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4323 CNTR_NORMAL,
4324 access_pcic_retry_mem_unc_err),
4325[C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4326 CNTR_NORMAL,
4327 access_pcic_n_post_dat_q_parity_err_cnt),
4328[C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4329 CNTR_NORMAL,
4330 access_pcic_n_post_h_q_parity_err_cnt),
4331[C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4332 CNTR_NORMAL,
4333 access_pcic_cpl_dat_q_cor_err_cnt),
4334[C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4335 CNTR_NORMAL,
4336 access_pcic_cpl_hd_q_cor_err_cnt),
4337[C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4338 CNTR_NORMAL,
4339 access_pcic_post_dat_q_cor_err_cnt),
4340[C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4341 CNTR_NORMAL,
4342 access_pcic_post_hd_q_cor_err_cnt),
4343[C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4344 CNTR_NORMAL,
4345 access_pcic_retry_sot_mem_cor_err_cnt),
4346[C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4347 CNTR_NORMAL,
4348 access_pcic_retry_mem_cor_err_cnt),
4349[C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4350 "CceCli1AsyncFifoDbgParityError", 0, 0,
4351 CNTR_NORMAL,
4352 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4353[C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4354 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4355 CNTR_NORMAL,
4356 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4357 ),
4358[C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4359 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4360 CNTR_NORMAL,
4361 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4362[C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4363 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4366[C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4367 0, CNTR_NORMAL,
4368 access_cce_cli2_async_fifo_parity_err_cnt),
4369[C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_cce_csr_cfg_bus_parity_err_cnt),
4372[C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4373 0, CNTR_NORMAL,
4374 access_cce_cli0_async_fifo_parity_err_cnt),
4375[C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_cce_rspd_data_parity_err_cnt),
4378[C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_cce_trgt_access_err_cnt),
4381[C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4382 0, CNTR_NORMAL,
4383 access_cce_trgt_async_fifo_parity_err_cnt),
4384[C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_cce_csr_write_bad_addr_err_cnt),
4387[C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_cce_csr_read_bad_addr_err_cnt),
4390[C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_ccs_csr_parity_err_cnt),
4393
4394/* RcvErrStatus */
4395[C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4396 CNTR_NORMAL,
4397 access_rx_csr_parity_err_cnt),
4398[C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4399 CNTR_NORMAL,
4400 access_rx_csr_write_bad_addr_err_cnt),
4401[C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4402 CNTR_NORMAL,
4403 access_rx_csr_read_bad_addr_err_cnt),
4404[C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4405 CNTR_NORMAL,
4406 access_rx_dma_csr_unc_err_cnt),
4407[C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4408 CNTR_NORMAL,
4409 access_rx_dma_dq_fsm_encoding_err_cnt),
4410[C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_rx_dma_eq_fsm_encoding_err_cnt),
4413[C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4414 CNTR_NORMAL,
4415 access_rx_dma_csr_parity_err_cnt),
4416[C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_rx_rbuf_data_cor_err_cnt),
4419[C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4420 CNTR_NORMAL,
4421 access_rx_rbuf_data_unc_err_cnt),
4422[C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_rx_dma_data_fifo_rd_cor_err_cnt),
4425[C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_rx_dma_data_fifo_rd_unc_err_cnt),
4428[C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4429 CNTR_NORMAL,
4430 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4431[C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4434[C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_rx_rbuf_desc_part2_cor_err_cnt),
4437[C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_rx_rbuf_desc_part2_unc_err_cnt),
4440[C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4441 CNTR_NORMAL,
4442 access_rx_rbuf_desc_part1_cor_err_cnt),
4443[C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4444 CNTR_NORMAL,
4445 access_rx_rbuf_desc_part1_unc_err_cnt),
4446[C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4447 CNTR_NORMAL,
4448 access_rx_hq_intr_fsm_err_cnt),
4449[C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4450 CNTR_NORMAL,
4451 access_rx_hq_intr_csr_parity_err_cnt),
4452[C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4453 CNTR_NORMAL,
4454 access_rx_lookup_csr_parity_err_cnt),
4455[C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4456 CNTR_NORMAL,
4457 access_rx_lookup_rcv_array_cor_err_cnt),
4458[C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4459 CNTR_NORMAL,
4460 access_rx_lookup_rcv_array_unc_err_cnt),
4461[C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4462 0, CNTR_NORMAL,
4463 access_rx_lookup_des_part2_parity_err_cnt),
4464[C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4465 0, CNTR_NORMAL,
4466 access_rx_lookup_des_part1_unc_cor_err_cnt),
4467[C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4468 CNTR_NORMAL,
4469 access_rx_lookup_des_part1_unc_err_cnt),
4470[C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4471 CNTR_NORMAL,
4472 access_rx_rbuf_next_free_buf_cor_err_cnt),
4473[C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4474 CNTR_NORMAL,
4475 access_rx_rbuf_next_free_buf_unc_err_cnt),
4476[C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4477 "RxRbufFlInitWrAddrParityErr", 0, 0,
4478 CNTR_NORMAL,
4479 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4480[C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4481 0, CNTR_NORMAL,
4482 access_rx_rbuf_fl_initdone_parity_err_cnt),
4483[C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4484 0, CNTR_NORMAL,
4485 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4486[C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4487 CNTR_NORMAL,
4488 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4489[C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4490 CNTR_NORMAL,
4491 access_rx_rbuf_empty_err_cnt),
4492[C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4493 CNTR_NORMAL,
4494 access_rx_rbuf_full_err_cnt),
4495[C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4496 CNTR_NORMAL,
4497 access_rbuf_bad_lookup_err_cnt),
4498[C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4499 CNTR_NORMAL,
4500 access_rbuf_ctx_id_parity_err_cnt),
4501[C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4502 CNTR_NORMAL,
4503 access_rbuf_csr_qeopdw_parity_err_cnt),
4504[C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4505 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4508[C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4509 "RxRbufCsrQTlPtrParityErr", 0, 0,
4510 CNTR_NORMAL,
4511 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4512[C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4513 0, CNTR_NORMAL,
4514 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4515[C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4516 0, CNTR_NORMAL,
4517 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4518[C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4519 0, 0, CNTR_NORMAL,
4520 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4521[C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4522 0, CNTR_NORMAL,
4523 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4524[C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4525 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4526 CNTR_NORMAL,
4527 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4528[C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4529 0, CNTR_NORMAL,
4530 access_rx_rbuf_block_list_read_cor_err_cnt),
4531[C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4532 0, CNTR_NORMAL,
4533 access_rx_rbuf_block_list_read_unc_err_cnt),
4534[C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4535 CNTR_NORMAL,
4536 access_rx_rbuf_lookup_des_cor_err_cnt),
4537[C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4538 CNTR_NORMAL,
4539 access_rx_rbuf_lookup_des_unc_err_cnt),
4540[C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4541 "RxRbufLookupDesRegUncCorErr", 0, 0,
4542 CNTR_NORMAL,
4543 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4544[C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4545 CNTR_NORMAL,
4546 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4547[C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4548 CNTR_NORMAL,
4549 access_rx_rbuf_free_list_cor_err_cnt),
4550[C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4551 CNTR_NORMAL,
4552 access_rx_rbuf_free_list_unc_err_cnt),
4553[C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4554 CNTR_NORMAL,
4555 access_rx_rcv_fsm_encoding_err_cnt),
4556[C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_rx_dma_flag_cor_err_cnt),
4559[C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4560 CNTR_NORMAL,
4561 access_rx_dma_flag_unc_err_cnt),
4562[C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4563 CNTR_NORMAL,
4564 access_rx_dc_sop_eop_parity_err_cnt),
4565[C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4566 CNTR_NORMAL,
4567 access_rx_rcv_csr_parity_err_cnt),
4568[C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4569 CNTR_NORMAL,
4570 access_rx_rcv_qp_map_table_cor_err_cnt),
4571[C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4572 CNTR_NORMAL,
4573 access_rx_rcv_qp_map_table_unc_err_cnt),
4574[C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4575 CNTR_NORMAL,
4576 access_rx_rcv_data_cor_err_cnt),
4577[C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4578 CNTR_NORMAL,
4579 access_rx_rcv_data_unc_err_cnt),
4580[C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4581 CNTR_NORMAL,
4582 access_rx_rcv_hdr_cor_err_cnt),
4583[C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4584 CNTR_NORMAL,
4585 access_rx_rcv_hdr_unc_err_cnt),
4586[C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4587 CNTR_NORMAL,
4588 access_rx_dc_intf_parity_err_cnt),
4589[C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4590 CNTR_NORMAL,
4591 access_rx_dma_csr_cor_err_cnt),
4592/* SendPioErrStatus */
4593[C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4594 CNTR_NORMAL,
4595 access_pio_pec_sop_head_parity_err_cnt),
4596[C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4597 CNTR_NORMAL,
4598 access_pio_pcc_sop_head_parity_err_cnt),
4599[C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4600 0, 0, CNTR_NORMAL,
4601 access_pio_last_returned_cnt_parity_err_cnt),
4602[C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4603 0, CNTR_NORMAL,
4604 access_pio_current_free_cnt_parity_err_cnt),
4605[C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4606 CNTR_NORMAL,
4607 access_pio_reserved_31_err_cnt),
4608[C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4609 CNTR_NORMAL,
4610 access_pio_reserved_30_err_cnt),
4611[C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4612 CNTR_NORMAL,
4613 access_pio_ppmc_sop_len_err_cnt),
4614[C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4615 CNTR_NORMAL,
4616 access_pio_ppmc_bqc_mem_parity_err_cnt),
4617[C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4618 CNTR_NORMAL,
4619 access_pio_vl_fifo_parity_err_cnt),
4620[C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4621 CNTR_NORMAL,
4622 access_pio_vlf_sop_parity_err_cnt),
4623[C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4624 CNTR_NORMAL,
4625 access_pio_vlf_v1_len_parity_err_cnt),
4626[C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4627 CNTR_NORMAL,
4628 access_pio_block_qw_count_parity_err_cnt),
4629[C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4630 CNTR_NORMAL,
4631 access_pio_write_qw_valid_parity_err_cnt),
4632[C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4633 CNTR_NORMAL,
4634 access_pio_state_machine_err_cnt),
4635[C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4636 CNTR_NORMAL,
4637 access_pio_write_data_parity_err_cnt),
4638[C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4639 CNTR_NORMAL,
4640 access_pio_host_addr_mem_cor_err_cnt),
4641[C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4642 CNTR_NORMAL,
4643 access_pio_host_addr_mem_unc_err_cnt),
4644[C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4645 CNTR_NORMAL,
4646 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4647[C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4648 CNTR_NORMAL,
4649 access_pio_init_sm_in_err_cnt),
4650[C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4651 CNTR_NORMAL,
4652 access_pio_ppmc_pbl_fifo_err_cnt),
4653[C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4654 0, CNTR_NORMAL,
4655 access_pio_credit_ret_fifo_parity_err_cnt),
4656[C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4657 CNTR_NORMAL,
4658 access_pio_v1_len_mem_bank1_cor_err_cnt),
4659[C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4660 CNTR_NORMAL,
4661 access_pio_v1_len_mem_bank0_cor_err_cnt),
4662[C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4663 CNTR_NORMAL,
4664 access_pio_v1_len_mem_bank1_unc_err_cnt),
4665[C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4666 CNTR_NORMAL,
4667 access_pio_v1_len_mem_bank0_unc_err_cnt),
4668[C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4669 CNTR_NORMAL,
4670 access_pio_sm_pkt_reset_parity_err_cnt),
4671[C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4672 CNTR_NORMAL,
4673 access_pio_pkt_evict_fifo_parity_err_cnt),
4674[C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4675 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4676 CNTR_NORMAL,
4677 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4678[C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4679 CNTR_NORMAL,
4680 access_pio_sbrdctl_crrel_parity_err_cnt),
4681[C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4682 CNTR_NORMAL,
4683 access_pio_pec_fifo_parity_err_cnt),
4684[C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4685 CNTR_NORMAL,
4686 access_pio_pcc_fifo_parity_err_cnt),
4687[C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4688 CNTR_NORMAL,
4689 access_pio_sb_mem_fifo1_err_cnt),
4690[C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4691 CNTR_NORMAL,
4692 access_pio_sb_mem_fifo0_err_cnt),
4693[C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4694 CNTR_NORMAL,
4695 access_pio_csr_parity_err_cnt),
4696[C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4697 CNTR_NORMAL,
4698 access_pio_write_addr_parity_err_cnt),
4699[C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4700 CNTR_NORMAL,
4701 access_pio_write_bad_ctxt_err_cnt),
4702/* SendDmaErrStatus */
4703[C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4704 0, CNTR_NORMAL,
4705 access_sdma_pcie_req_tracking_cor_err_cnt),
4706[C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4707 0, CNTR_NORMAL,
4708 access_sdma_pcie_req_tracking_unc_err_cnt),
4709[C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_sdma_csr_parity_err_cnt),
4712[C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_sdma_rpy_tag_err_cnt),
4715/* SendEgressErrStatus */
4716[C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4717 CNTR_NORMAL,
4718 access_tx_read_pio_memory_csr_unc_err_cnt),
4719[C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4720 0, CNTR_NORMAL,
4721 access_tx_read_sdma_memory_csr_err_cnt),
4722[C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4723 CNTR_NORMAL,
4724 access_tx_egress_fifo_cor_err_cnt),
4725[C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4726 CNTR_NORMAL,
4727 access_tx_read_pio_memory_cor_err_cnt),
4728[C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4729 CNTR_NORMAL,
4730 access_tx_read_sdma_memory_cor_err_cnt),
4731[C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4732 CNTR_NORMAL,
4733 access_tx_sb_hdr_cor_err_cnt),
4734[C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4735 CNTR_NORMAL,
4736 access_tx_credit_overrun_err_cnt),
4737[C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4738 CNTR_NORMAL,
4739 access_tx_launch_fifo8_cor_err_cnt),
4740[C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_tx_launch_fifo7_cor_err_cnt),
4743[C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_tx_launch_fifo6_cor_err_cnt),
4746[C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4747 CNTR_NORMAL,
4748 access_tx_launch_fifo5_cor_err_cnt),
4749[C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4750 CNTR_NORMAL,
4751 access_tx_launch_fifo4_cor_err_cnt),
4752[C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4753 CNTR_NORMAL,
4754 access_tx_launch_fifo3_cor_err_cnt),
4755[C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4756 CNTR_NORMAL,
4757 access_tx_launch_fifo2_cor_err_cnt),
4758[C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4759 CNTR_NORMAL,
4760 access_tx_launch_fifo1_cor_err_cnt),
4761[C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4762 CNTR_NORMAL,
4763 access_tx_launch_fifo0_cor_err_cnt),
4764[C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4765 CNTR_NORMAL,
4766 access_tx_credit_return_vl_err_cnt),
4767[C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4768 CNTR_NORMAL,
4769 access_tx_hcrc_insertion_err_cnt),
4770[C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4771 CNTR_NORMAL,
4772 access_tx_egress_fifo_unc_err_cnt),
4773[C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4774 CNTR_NORMAL,
4775 access_tx_read_pio_memory_unc_err_cnt),
4776[C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4777 CNTR_NORMAL,
4778 access_tx_read_sdma_memory_unc_err_cnt),
4779[C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4780 CNTR_NORMAL,
4781 access_tx_sb_hdr_unc_err_cnt),
4782[C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4783 CNTR_NORMAL,
4784 access_tx_credit_return_partiy_err_cnt),
4785[C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4786 0, 0, CNTR_NORMAL,
4787 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4788[C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4789 0, 0, CNTR_NORMAL,
4790 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4791[C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4792 0, 0, CNTR_NORMAL,
4793 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4794[C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4795 0, 0, CNTR_NORMAL,
4796 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4797[C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4798 0, 0, CNTR_NORMAL,
4799 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4800[C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4801 0, 0, CNTR_NORMAL,
4802 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4803[C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4804 0, 0, CNTR_NORMAL,
4805 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4806[C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4807 0, 0, CNTR_NORMAL,
4808 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4809[C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4810 0, 0, CNTR_NORMAL,
4811 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4812[C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4813 0, 0, CNTR_NORMAL,
4814 access_tx_sdma15_disallowed_packet_err_cnt),
4815[C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4816 0, 0, CNTR_NORMAL,
4817 access_tx_sdma14_disallowed_packet_err_cnt),
4818[C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4819 0, 0, CNTR_NORMAL,
4820 access_tx_sdma13_disallowed_packet_err_cnt),
4821[C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4822 0, 0, CNTR_NORMAL,
4823 access_tx_sdma12_disallowed_packet_err_cnt),
4824[C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4825 0, 0, CNTR_NORMAL,
4826 access_tx_sdma11_disallowed_packet_err_cnt),
4827[C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4828 0, 0, CNTR_NORMAL,
4829 access_tx_sdma10_disallowed_packet_err_cnt),
4830[C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4831 0, 0, CNTR_NORMAL,
4832 access_tx_sdma9_disallowed_packet_err_cnt),
4833[C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4834 0, 0, CNTR_NORMAL,
4835 access_tx_sdma8_disallowed_packet_err_cnt),
4836[C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4837 0, 0, CNTR_NORMAL,
4838 access_tx_sdma7_disallowed_packet_err_cnt),
4839[C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4840 0, 0, CNTR_NORMAL,
4841 access_tx_sdma6_disallowed_packet_err_cnt),
4842[C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4843 0, 0, CNTR_NORMAL,
4844 access_tx_sdma5_disallowed_packet_err_cnt),
4845[C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4846 0, 0, CNTR_NORMAL,
4847 access_tx_sdma4_disallowed_packet_err_cnt),
4848[C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4849 0, 0, CNTR_NORMAL,
4850 access_tx_sdma3_disallowed_packet_err_cnt),
4851[C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4852 0, 0, CNTR_NORMAL,
4853 access_tx_sdma2_disallowed_packet_err_cnt),
4854[C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4855 0, 0, CNTR_NORMAL,
4856 access_tx_sdma1_disallowed_packet_err_cnt),
4857[C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4858 0, 0, CNTR_NORMAL,
4859 access_tx_sdma0_disallowed_packet_err_cnt),
4860[C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4861 CNTR_NORMAL,
4862 access_tx_config_parity_err_cnt),
4863[C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4864 CNTR_NORMAL,
4865 access_tx_sbrd_ctl_csr_parity_err_cnt),
4866[C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4867 CNTR_NORMAL,
4868 access_tx_launch_csr_parity_err_cnt),
4869[C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4870 CNTR_NORMAL,
4871 access_tx_illegal_vl_err_cnt),
4872[C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4873 "TxSbrdCtlStateMachineParityErr", 0, 0,
4874 CNTR_NORMAL,
4875 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4876[C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4877 CNTR_NORMAL,
4878 access_egress_reserved_10_err_cnt),
4879[C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4880 CNTR_NORMAL,
4881 access_egress_reserved_9_err_cnt),
4882[C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4883 0, 0, CNTR_NORMAL,
4884 access_tx_sdma_launch_intf_parity_err_cnt),
4885[C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4886 CNTR_NORMAL,
4887 access_tx_pio_launch_intf_parity_err_cnt),
4888[C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4889 CNTR_NORMAL,
4890 access_egress_reserved_6_err_cnt),
4891[C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4892 CNTR_NORMAL,
4893 access_tx_incorrect_link_state_err_cnt),
4894[C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4895 CNTR_NORMAL,
4896 access_tx_linkdown_err_cnt),
4897[C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4898 "EgressFifoUnderrunOrParityErr", 0, 0,
4899 CNTR_NORMAL,
4900 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4901[C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4902 CNTR_NORMAL,
4903 access_egress_reserved_2_err_cnt),
4904[C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4905 CNTR_NORMAL,
4906 access_tx_pkt_integrity_mem_unc_err_cnt),
4907[C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_tx_pkt_integrity_mem_cor_err_cnt),
4910/* SendErrStatus */
4911[C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4912 CNTR_NORMAL,
4913 access_send_csr_write_bad_addr_err_cnt),
4914[C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4915 CNTR_NORMAL,
4916 access_send_csr_read_bad_addr_err_cnt),
4917[C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4918 CNTR_NORMAL,
4919 access_send_csr_parity_cnt),
4920/* SendCtxtErrStatus */
4921[C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4922 CNTR_NORMAL,
4923 access_pio_write_out_of_bounds_err_cnt),
4924[C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4925 CNTR_NORMAL,
4926 access_pio_write_overflow_err_cnt),
4927[C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4928 0, 0, CNTR_NORMAL,
4929 access_pio_write_crosses_boundary_err_cnt),
4930[C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4931 CNTR_NORMAL,
4932 access_pio_disallowed_packet_err_cnt),
4933[C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4934 CNTR_NORMAL,
4935 access_pio_inconsistent_sop_err_cnt),
4936/* SendDmaEngErrStatus */
4937[C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4938 0, 0, CNTR_NORMAL,
4939 access_sdma_header_request_fifo_cor_err_cnt),
4940[C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4941 CNTR_NORMAL,
4942 access_sdma_header_storage_cor_err_cnt),
4943[C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4944 CNTR_NORMAL,
4945 access_sdma_packet_tracking_cor_err_cnt),
4946[C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4947 CNTR_NORMAL,
4948 access_sdma_assembly_cor_err_cnt),
4949[C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4950 CNTR_NORMAL,
4951 access_sdma_desc_table_cor_err_cnt),
4952[C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4953 0, 0, CNTR_NORMAL,
4954 access_sdma_header_request_fifo_unc_err_cnt),
4955[C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4956 CNTR_NORMAL,
4957 access_sdma_header_storage_unc_err_cnt),
4958[C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4959 CNTR_NORMAL,
4960 access_sdma_packet_tracking_unc_err_cnt),
4961[C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4962 CNTR_NORMAL,
4963 access_sdma_assembly_unc_err_cnt),
4964[C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4965 CNTR_NORMAL,
4966 access_sdma_desc_table_unc_err_cnt),
4967[C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4968 CNTR_NORMAL,
4969 access_sdma_timeout_err_cnt),
4970[C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4971 CNTR_NORMAL,
4972 access_sdma_header_length_err_cnt),
4973[C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4974 CNTR_NORMAL,
4975 access_sdma_header_address_err_cnt),
4976[C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4977 CNTR_NORMAL,
4978 access_sdma_header_select_err_cnt),
4979[C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4980 CNTR_NORMAL,
4981 access_sdma_reserved_9_err_cnt),
4982[C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4983 CNTR_NORMAL,
4984 access_sdma_packet_desc_overflow_err_cnt),
4985[C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4986 CNTR_NORMAL,
4987 access_sdma_length_mismatch_err_cnt),
4988[C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4989 CNTR_NORMAL,
4990 access_sdma_halt_err_cnt),
4991[C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4992 CNTR_NORMAL,
4993 access_sdma_mem_read_err_cnt),
4994[C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4995 CNTR_NORMAL,
4996 access_sdma_first_desc_err_cnt),
4997[C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4998 CNTR_NORMAL,
4999 access_sdma_tail_out_of_bounds_err_cnt),
5000[C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5001 CNTR_NORMAL,
5002 access_sdma_too_long_err_cnt),
5003[C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5004 CNTR_NORMAL,
5005 access_sdma_gen_mismatch_err_cnt),
5006[C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5007 CNTR_NORMAL,
5008 access_sdma_wrong_dw_err_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005009};
5010
5011static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5012[C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5013 CNTR_NORMAL),
5014[C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5015 CNTR_NORMAL),
5016[C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5017 CNTR_NORMAL),
5018[C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5019 CNTR_NORMAL),
5020[C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5021 CNTR_NORMAL),
5022[C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5023 CNTR_NORMAL),
5024[C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5025 CNTR_NORMAL),
5026[C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5027[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5028[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5029[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005030 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005031[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005032 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005033[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
Jubin John17fb4f22016-02-14 20:21:52 -08005034 CNTR_SYNTH | CNTR_VL),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005035[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5036[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5037[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005038 access_sw_link_dn_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005039[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005040 access_sw_link_up_cnt),
Dean Luick6d014532015-12-01 15:38:23 -05005041[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5042 access_sw_unknown_frame_cnt),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005043[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
Jubin John17fb4f22016-02-14 20:21:52 -08005044 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005045[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08005046 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5047 access_sw_xmit_discards),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005048[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005049 access_xmit_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005050[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
Jubin John17fb4f22016-02-14 20:21:52 -08005051 access_rcv_constraint_errs),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005052[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5053[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5054[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5055[C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5056[C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5057[C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5058[C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5059[C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5060[C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5061[C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5062[C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5063[C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5064[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5065 access_sw_cpu_rc_acks),
5066[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005067 access_sw_cpu_rc_qacks),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005068[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
Jubin John17fb4f22016-02-14 20:21:52 -08005069 access_sw_cpu_rc_delayed_comp),
Mike Marciniszyn77241052015-07-30 15:17:43 -04005070[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5071[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5072[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5073[OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5074[OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5075[OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5076[OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5077[OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5078[OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5079[OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5080[OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5081[OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5082[OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5083[OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5084[OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5085[OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5086[OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5087[OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5088[OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5089[OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5090[OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5091[OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5092[OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5093[OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5094[OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5095[OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5096[OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5097[OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5098[OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5099[OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5100[OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5101[OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5102[OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5103[OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5104[OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5105[OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5106[OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5107[OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5108[OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5109[OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5110[OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5111[OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5112[OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5113[OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5114[OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5115[OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5116[OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5117[OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5118[OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5119[OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5120[OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5121[OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5122[OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5123[OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5124[OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5125[OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5126[OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5127[OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5128[OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5129[OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5130[OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5131[OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5132[OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5133[OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5134[OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5135[OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5136[OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5137[OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5138[OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5139[OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5140[OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5141[OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5142[OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5143[OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5144[OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5145[OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5146[OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5147[OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5148[OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5149[OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5150};
5151
5152/* ======================================================================== */
5153
Mike Marciniszyn77241052015-07-30 15:17:43 -04005154/* return true if this is chip revision revision a */
5155int is_ax(struct hfi1_devdata *dd)
5156{
5157 u8 chip_rev_minor =
5158 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5159 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5160 return (chip_rev_minor & 0xf0) == 0;
5161}
5162
5163/* return true if this is chip revision revision b */
5164int is_bx(struct hfi1_devdata *dd)
5165{
5166 u8 chip_rev_minor =
5167 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5168 & CCE_REVISION_CHIP_REV_MINOR_MASK;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005169 return (chip_rev_minor & 0xF0) == 0x10;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005170}
5171
5172/*
5173 * Append string s to buffer buf. Arguments curp and len are the current
5174 * position and remaining length, respectively.
5175 *
5176 * return 0 on success, 1 on out of room
5177 */
5178static int append_str(char *buf, char **curp, int *lenp, const char *s)
5179{
5180 char *p = *curp;
5181 int len = *lenp;
5182 int result = 0; /* success */
5183 char c;
5184
5185 /* add a comma, if first in the buffer */
5186 if (p != buf) {
5187 if (len == 0) {
5188 result = 1; /* out of room */
5189 goto done;
5190 }
5191 *p++ = ',';
5192 len--;
5193 }
5194
5195 /* copy the string */
5196 while ((c = *s++) != 0) {
5197 if (len == 0) {
5198 result = 1; /* out of room */
5199 goto done;
5200 }
5201 *p++ = c;
5202 len--;
5203 }
5204
5205done:
5206 /* write return values */
5207 *curp = p;
5208 *lenp = len;
5209
5210 return result;
5211}
5212
5213/*
5214 * Using the given flag table, print a comma separated string into
5215 * the buffer. End in '*' if the buffer is too short.
5216 */
5217static char *flag_string(char *buf, int buf_len, u64 flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005218 struct flag_table *table, int table_size)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005219{
5220 char extra[32];
5221 char *p = buf;
5222 int len = buf_len;
5223 int no_room = 0;
5224 int i;
5225
5226 /* make sure there is at least 2 so we can form "*" */
5227 if (len < 2)
5228 return "";
5229
5230 len--; /* leave room for a nul */
5231 for (i = 0; i < table_size; i++) {
5232 if (flags & table[i].flag) {
5233 no_room = append_str(buf, &p, &len, table[i].str);
5234 if (no_room)
5235 break;
5236 flags &= ~table[i].flag;
5237 }
5238 }
5239
5240 /* any undocumented bits left? */
5241 if (!no_room && flags) {
5242 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5243 no_room = append_str(buf, &p, &len, extra);
5244 }
5245
5246 /* add * if ran out of room */
5247 if (no_room) {
5248 /* may need to back up to add space for a '*' */
5249 if (len == 0)
5250 --p;
5251 *p++ = '*';
5252 }
5253
5254 /* add final nul - space already allocated above */
5255 *p = 0;
5256 return buf;
5257}
5258
5259/* first 8 CCE error interrupt source names */
5260static const char * const cce_misc_names[] = {
5261 "CceErrInt", /* 0 */
5262 "RxeErrInt", /* 1 */
5263 "MiscErrInt", /* 2 */
5264 "Reserved3", /* 3 */
5265 "PioErrInt", /* 4 */
5266 "SDmaErrInt", /* 5 */
5267 "EgressErrInt", /* 6 */
5268 "TxeErrInt" /* 7 */
5269};
5270
5271/*
5272 * Return the miscellaneous error interrupt name.
5273 */
5274static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5275{
5276 if (source < ARRAY_SIZE(cce_misc_names))
5277 strncpy(buf, cce_misc_names[source], bsize);
5278 else
Jubin John17fb4f22016-02-14 20:21:52 -08005279 snprintf(buf, bsize, "Reserved%u",
5280 source + IS_GENERAL_ERR_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005281
5282 return buf;
5283}
5284
5285/*
5286 * Return the SDMA engine error interrupt name.
5287 */
5288static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5289{
5290 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5291 return buf;
5292}
5293
5294/*
5295 * Return the send context error interrupt name.
5296 */
5297static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5298{
5299 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5300 return buf;
5301}
5302
5303static const char * const various_names[] = {
5304 "PbcInt",
5305 "GpioAssertInt",
5306 "Qsfp1Int",
5307 "Qsfp2Int",
5308 "TCritInt"
5309};
5310
5311/*
5312 * Return the various interrupt name.
5313 */
5314static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5315{
5316 if (source < ARRAY_SIZE(various_names))
5317 strncpy(buf, various_names[source], bsize);
5318 else
Jubin John8638b772016-02-14 20:19:24 -08005319 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005320 return buf;
5321}
5322
5323/*
5324 * Return the DC interrupt name.
5325 */
5326static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5327{
5328 static const char * const dc_int_names[] = {
5329 "common",
5330 "lcb",
5331 "8051",
5332 "lbm" /* local block merge */
5333 };
5334
5335 if (source < ARRAY_SIZE(dc_int_names))
5336 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5337 else
5338 snprintf(buf, bsize, "DCInt%u", source);
5339 return buf;
5340}
5341
5342static const char * const sdma_int_names[] = {
5343 "SDmaInt",
5344 "SdmaIdleInt",
5345 "SdmaProgressInt",
5346};
5347
5348/*
5349 * Return the SDMA engine interrupt name.
5350 */
5351static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5352{
5353 /* what interrupt */
5354 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5355 /* which engine */
5356 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5357
5358 if (likely(what < 3))
5359 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5360 else
5361 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5362 return buf;
5363}
5364
5365/*
5366 * Return the receive available interrupt name.
5367 */
5368static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5369{
5370 snprintf(buf, bsize, "RcvAvailInt%u", source);
5371 return buf;
5372}
5373
5374/*
5375 * Return the receive urgent interrupt name.
5376 */
5377static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5378{
5379 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5380 return buf;
5381}
5382
5383/*
5384 * Return the send credit interrupt name.
5385 */
5386static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5387{
5388 snprintf(buf, bsize, "SendCreditInt%u", source);
5389 return buf;
5390}
5391
5392/*
5393 * Return the reserved interrupt name.
5394 */
5395static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5396{
5397 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5398 return buf;
5399}
5400
5401static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5402{
5403 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005404 cce_err_status_flags,
5405 ARRAY_SIZE(cce_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005406}
5407
5408static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5409{
5410 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005411 rxe_err_status_flags,
5412 ARRAY_SIZE(rxe_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005413}
5414
5415static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5416{
5417 return flag_string(buf, buf_len, flags, misc_err_status_flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005418 ARRAY_SIZE(misc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005419}
5420
5421static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5422{
5423 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005424 pio_err_status_flags,
5425 ARRAY_SIZE(pio_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005426}
5427
5428static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5429{
5430 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005431 sdma_err_status_flags,
5432 ARRAY_SIZE(sdma_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005433}
5434
5435static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5436{
5437 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005438 egress_err_status_flags,
5439 ARRAY_SIZE(egress_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005440}
5441
5442static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5443{
5444 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005445 egress_err_info_flags,
5446 ARRAY_SIZE(egress_err_info_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005447}
5448
5449static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5450{
5451 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005452 send_err_status_flags,
5453 ARRAY_SIZE(send_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005454}
5455
5456static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5457{
5458 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005459 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005460
5461 /*
5462 * For most these errors, there is nothing that can be done except
5463 * report or record it.
5464 */
5465 dd_dev_info(dd, "CCE Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005466 cce_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005467
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005468 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5469 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005470 /* this error requires a manual drop into SPC freeze mode */
5471 /* then a fix up */
5472 start_freeze_handling(dd->pport, FREEZE_SELF);
5473 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005474
5475 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5476 if (reg & (1ull << i)) {
5477 incr_cntr64(&dd->cce_err_status_cnt[i]);
5478 /* maintain a counter over all cce_err_status errors */
5479 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5480 }
5481 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005482}
5483
5484/*
5485 * Check counters for receive errors that do not have an interrupt
5486 * associated with them.
5487 */
5488#define RCVERR_CHECK_TIME 10
5489static void update_rcverr_timer(unsigned long opaque)
5490{
5491 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5492 struct hfi1_pportdata *ppd = dd->pport;
5493 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5494
5495 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
Jubin John17fb4f22016-02-14 20:21:52 -08005496 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04005497 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
Jubin John17fb4f22016-02-14 20:21:52 -08005498 set_link_down_reason(
5499 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5500 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005501 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5502 }
Jubin John50e5dcb2016-02-14 20:19:41 -08005503 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005504
5505 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5506}
5507
5508static int init_rcverr(struct hfi1_devdata *dd)
5509{
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +05305510 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005511 /* Assume the hardware counter has been reset */
5512 dd->rcv_ovfl_cnt = 0;
5513 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5514}
5515
5516static void free_rcverr(struct hfi1_devdata *dd)
5517{
5518 if (dd->rcverr_timer.data)
5519 del_timer_sync(&dd->rcverr_timer);
5520 dd->rcverr_timer.data = 0;
5521}
5522
5523static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5524{
5525 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005526 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005527
5528 dd_dev_info(dd, "Receive Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005529 rxe_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005530
5531 if (reg & ALL_RXE_FREEZE_ERR) {
5532 int flags = 0;
5533
5534 /*
5535 * Freeze mode recovery is disabled for the errors
5536 * in RXE_FREEZE_ABORT_MASK
5537 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05005538 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005539 flags = FREEZE_ABORT;
5540
5541 start_freeze_handling(dd->pport, flags);
5542 }
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005543
5544 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5545 if (reg & (1ull << i))
5546 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5547 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005548}
5549
5550static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5551{
5552 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005553 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005554
5555 dd_dev_info(dd, "Misc Error: %s",
Jubin John17fb4f22016-02-14 20:21:52 -08005556 misc_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005557 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5558 if (reg & (1ull << i))
5559 incr_cntr64(&dd->misc_err_status_cnt[i]);
5560 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005561}
5562
5563static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5564{
5565 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005566 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005567
5568 dd_dev_info(dd, "PIO Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005569 pio_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005570
5571 if (reg & ALL_PIO_FREEZE_ERR)
5572 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005573
5574 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5575 if (reg & (1ull << i))
5576 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5577 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005578}
5579
5580static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5581{
5582 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005583 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005584
5585 dd_dev_info(dd, "SDMA Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005586 sdma_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005587
5588 if (reg & ALL_SDMA_FREEZE_ERR)
5589 start_freeze_handling(dd->pport, 0);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005590
5591 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5592 if (reg & (1ull << i))
5593 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5594 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005595}
5596
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005597static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5598{
5599 incr_cntr64(&ppd->port_xmit_discards);
5600}
5601
Mike Marciniszyn77241052015-07-30 15:17:43 -04005602static void count_port_inactive(struct hfi1_devdata *dd)
5603{
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005604 __count_port_discards(dd->pport);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005605}
5606
5607/*
5608 * We have had a "disallowed packet" error during egress. Determine the
5609 * integrity check which failed, and update relevant error counter, etc.
5610 *
5611 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5612 * bit of state per integrity check, and so we can miss the reason for an
5613 * egress error if more than one packet fails the same integrity check
5614 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5615 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005616static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5617 int vl)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005618{
5619 struct hfi1_pportdata *ppd = dd->pport;
5620 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5621 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5622 char buf[96];
5623
5624 /* clear down all observed info as quickly as possible after read */
5625 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5626
5627 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005628 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5629 info, egress_err_info_string(buf, sizeof(buf), info), src);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005630
5631 /* Eventually add other counters for each bit */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005632 if (info & PORT_DISCARD_EGRESS_ERRS) {
5633 int weight, i;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005634
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005635 /*
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005636 * Count all applicable bits as individual errors and
5637 * attribute them to the packet that triggered this handler.
5638 * This may not be completely accurate due to limitations
5639 * on the available hardware error information. There is
5640 * a single information register and any number of error
5641 * packets may have occurred and contributed to it before
5642 * this routine is called. This means that:
5643 * a) If multiple packets with the same error occur before
5644 * this routine is called, earlier packets are missed.
5645 * There is only a single bit for each error type.
5646 * b) Errors may not be attributed to the correct VL.
5647 * The driver is attributing all bits in the info register
5648 * to the packet that triggered this call, but bits
5649 * could be an accumulation of different packets with
5650 * different VLs.
5651 * c) A single error packet may have multiple counts attached
5652 * to it. There is no way for the driver to know if
5653 * multiple bits set in the info register are due to a
5654 * single packet or multiple packets. The driver assumes
5655 * multiple packets.
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005656 */
Dean Luick4c9e7aa2016-02-18 11:12:08 -08005657 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005658 for (i = 0; i < weight; i++) {
5659 __count_port_discards(ppd);
5660 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5661 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5662 else if (vl == 15)
5663 incr_cntr64(&ppd->port_xmit_discards_vl
5664 [C_VL_15]);
5665 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005666 }
5667}
5668
5669/*
5670 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5671 * register. Does it represent a 'port inactive' error?
5672 */
5673static inline int port_inactive_err(u64 posn)
5674{
5675 return (posn >= SEES(TX_LINKDOWN) &&
5676 posn <= SEES(TX_INCORRECT_LINK_STATE));
5677}
5678
5679/*
5680 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5681 * register. Does it represent a 'disallowed packet' error?
5682 */
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005683static inline int disallowed_pkt_err(int posn)
Mike Marciniszyn77241052015-07-30 15:17:43 -04005684{
5685 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5686 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5687}
5688
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005689/*
5690 * Input value is a bit position of one of the SDMA engine disallowed
5691 * packet errors. Return which engine. Use of this must be guarded by
5692 * disallowed_pkt_err().
5693 */
5694static inline int disallowed_pkt_engine(int posn)
5695{
5696 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5697}
5698
5699/*
5700 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5701 * be done.
5702 */
5703static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5704{
5705 struct sdma_vl_map *m;
5706 int vl;
5707
5708 /* range check */
5709 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5710 return -1;
5711
5712 rcu_read_lock();
5713 m = rcu_dereference(dd->sdma_map);
5714 vl = m->engine_to_vl[engine];
5715 rcu_read_unlock();
5716
5717 return vl;
5718}
5719
5720/*
5721 * Translate the send context (sofware index) into a VL. Return -1 if the
5722 * translation cannot be done.
5723 */
5724static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5725{
5726 struct send_context_info *sci;
5727 struct send_context *sc;
5728 int i;
5729
5730 sci = &dd->send_contexts[sw_index];
5731
5732 /* there is no information for user (PSM) and ack contexts */
Jianxin Xiong44306f12016-04-12 11:30:28 -07005733 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005734 return -1;
5735
5736 sc = sci->sc;
5737 if (!sc)
5738 return -1;
5739 if (dd->vld[15].sc == sc)
5740 return 15;
5741 for (i = 0; i < num_vls; i++)
5742 if (dd->vld[i].sc == sc)
5743 return i;
5744
5745 return -1;
5746}
5747
Mike Marciniszyn77241052015-07-30 15:17:43 -04005748static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5749{
5750 u64 reg_copy = reg, handled = 0;
5751 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005752 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005753
5754 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5755 start_freeze_handling(dd->pport, 0);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005756 else if (is_ax(dd) &&
5757 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5758 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
Mike Marciniszyn77241052015-07-30 15:17:43 -04005759 start_freeze_handling(dd->pport, 0);
5760
5761 while (reg_copy) {
5762 int posn = fls64(reg_copy);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005763 /* fls64() returns a 1-based offset, we want it zero based */
Mike Marciniszyn77241052015-07-30 15:17:43 -04005764 int shift = posn - 1;
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005765 u64 mask = 1ULL << shift;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005766
5767 if (port_inactive_err(shift)) {
5768 count_port_inactive(dd);
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005769 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005770 } else if (disallowed_pkt_err(shift)) {
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005771 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5772
5773 handle_send_egress_err_info(dd, vl);
5774 handled |= mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005775 }
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005776 reg_copy &= ~mask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005777 }
5778
5779 reg &= ~handled;
5780
5781 if (reg)
5782 dd_dev_info(dd, "Egress Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005783 egress_err_status_string(buf, sizeof(buf), reg));
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005784
5785 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5786 if (reg & (1ull << i))
5787 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5788 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005789}
5790
5791static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5792{
5793 char buf[96];
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005794 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005795
5796 dd_dev_info(dd, "Send Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005797 send_err_status_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005798
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005799 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5800 if (reg & (1ull << i))
5801 incr_cntr64(&dd->send_err_status_cnt[i]);
5802 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005803}
5804
5805/*
5806 * The maximum number of times the error clear down will loop before
5807 * blocking a repeating error. This value is arbitrary.
5808 */
5809#define MAX_CLEAR_COUNT 20
5810
5811/*
5812 * Clear and handle an error register. All error interrupts are funneled
5813 * through here to have a central location to correctly handle single-
5814 * or multi-shot errors.
5815 *
5816 * For non per-context registers, call this routine with a context value
5817 * of 0 so the per-context offset is zero.
5818 *
5819 * If the handler loops too many times, assume that something is wrong
5820 * and can't be fixed, so mask the error bits.
5821 */
5822static void interrupt_clear_down(struct hfi1_devdata *dd,
5823 u32 context,
5824 const struct err_reg_info *eri)
5825{
5826 u64 reg;
5827 u32 count;
5828
5829 /* read in a loop until no more errors are seen */
5830 count = 0;
5831 while (1) {
5832 reg = read_kctxt_csr(dd, context, eri->status);
5833 if (reg == 0)
5834 break;
5835 write_kctxt_csr(dd, context, eri->clear, reg);
5836 if (likely(eri->handler))
5837 eri->handler(dd, context, reg);
5838 count++;
5839 if (count > MAX_CLEAR_COUNT) {
5840 u64 mask;
5841
5842 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005843 eri->desc, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005844 /*
5845 * Read-modify-write so any other masked bits
5846 * remain masked.
5847 */
5848 mask = read_kctxt_csr(dd, context, eri->mask);
5849 mask &= ~reg;
5850 write_kctxt_csr(dd, context, eri->mask, mask);
5851 break;
5852 }
5853 }
5854}
5855
5856/*
5857 * CCE block "misc" interrupt. Source is < 16.
5858 */
5859static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5860{
5861 const struct err_reg_info *eri = &misc_errs[source];
5862
5863 if (eri->handler) {
5864 interrupt_clear_down(dd, 0, eri);
5865 } else {
5866 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
Jubin John17fb4f22016-02-14 20:21:52 -08005867 source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005868 }
5869}
5870
5871static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5872{
5873 return flag_string(buf, buf_len, flags,
Jubin John17fb4f22016-02-14 20:21:52 -08005874 sc_err_status_flags,
5875 ARRAY_SIZE(sc_err_status_flags));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005876}
5877
5878/*
5879 * Send context error interrupt. Source (hw_context) is < 160.
5880 *
5881 * All send context errors cause the send context to halt. The normal
5882 * clear-down mechanism cannot be used because we cannot clear the
5883 * error bits until several other long-running items are done first.
5884 * This is OK because with the context halted, nothing else is going
5885 * to happen on it anyway.
5886 */
5887static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5888 unsigned int hw_context)
5889{
5890 struct send_context_info *sci;
5891 struct send_context *sc;
5892 char flags[96];
5893 u64 status;
5894 u32 sw_index;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005895 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005896
5897 sw_index = dd->hw_to_sw[hw_context];
5898 if (sw_index >= dd->num_send_contexts) {
5899 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08005900 "out of range sw index %u for send context %u\n",
5901 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005902 return;
5903 }
5904 sci = &dd->send_contexts[sw_index];
5905 sc = sci->sc;
5906 if (!sc) {
5907 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -08005908 sw_index, hw_context);
Mike Marciniszyn77241052015-07-30 15:17:43 -04005909 return;
5910 }
5911
5912 /* tell the software that a halt has begun */
5913 sc_stop(sc, SCF_HALTED);
5914
5915 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5916
5917 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
Jubin John17fb4f22016-02-14 20:21:52 -08005918 send_context_err_status_string(flags, sizeof(flags),
5919 status));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005920
5921 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
Mike Marciniszyn69a00b82016-02-03 14:31:49 -08005922 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
Mike Marciniszyn77241052015-07-30 15:17:43 -04005923
5924 /*
5925 * Automatically restart halted kernel contexts out of interrupt
5926 * context. User contexts must ask the driver to restart the context.
5927 */
5928 if (sc->type != SC_USER)
5929 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005930
5931 /*
5932 * Update the counters for the corresponding status bits.
5933 * Note that these particular counters are aggregated over all
5934 * 160 contexts.
5935 */
5936 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5937 if (status & (1ull << i))
5938 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5939 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005940}
5941
5942static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5943 unsigned int source, u64 status)
5944{
5945 struct sdma_engine *sde;
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005946 int i = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005947
5948 sde = &dd->per_sdma[source];
5949#ifdef CONFIG_SDMA_VERBOSITY
5950 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5951 slashstrip(__FILE__), __LINE__, __func__);
5952 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5953 sde->this_idx, source, (unsigned long long)status);
5954#endif
Vennila Megavannana699c6c2016-01-11 18:30:56 -05005955 sde->err_cnt++;
Mike Marciniszyn77241052015-07-30 15:17:43 -04005956 sdma_engine_error(sde, status);
Joel Rosenzweig2c5b5212015-12-01 15:38:19 -05005957
5958 /*
5959 * Update the counters for the corresponding status bits.
5960 * Note that these particular counters are aggregated over
5961 * all 16 DMA engines.
5962 */
5963 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5964 if (status & (1ull << i))
5965 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5966 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04005967}
5968
5969/*
5970 * CCE block SDMA error interrupt. Source is < 16.
5971 */
5972static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5973{
5974#ifdef CONFIG_SDMA_VERBOSITY
5975 struct sdma_engine *sde = &dd->per_sdma[source];
5976
5977 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5978 slashstrip(__FILE__), __LINE__, __func__);
5979 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5980 source);
5981 sdma_dumpstate(sde);
5982#endif
5983 interrupt_clear_down(dd, source, &sdma_eng_err);
5984}
5985
5986/*
5987 * CCE block "various" interrupt. Source is < 8.
5988 */
5989static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5990{
5991 const struct err_reg_info *eri = &various_err[source];
5992
5993 /*
5994 * TCritInt cannot go through interrupt_clear_down()
5995 * because it is not a second tier interrupt. The handler
5996 * should be called directly.
5997 */
5998 if (source == TCRIT_INT_SOURCE)
5999 handle_temp_err(dd);
6000 else if (eri->handler)
6001 interrupt_clear_down(dd, 0, eri);
6002 else
6003 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006004 "%s: Unimplemented/reserved interrupt %d\n",
6005 __func__, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006006}
6007
6008static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6009{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006010 /* src_ctx is always zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006011 struct hfi1_pportdata *ppd = dd->pport;
6012 unsigned long flags;
6013 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6014
6015 if (reg & QSFP_HFI0_MODPRST_N) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006016 if (!qsfp_mod_present(ppd)) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006017 dd_dev_info(dd, "%s: QSFP module removed\n",
6018 __func__);
6019
Mike Marciniszyn77241052015-07-30 15:17:43 -04006020 ppd->driver_link_ready = 0;
6021 /*
6022 * Cable removed, reset all our information about the
6023 * cache and cable capabilities
6024 */
6025
6026 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6027 /*
6028 * We don't set cache_refresh_required here as we expect
6029 * an interrupt when a cable is inserted
6030 */
6031 ppd->qsfp_info.cache_valid = 0;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006032 ppd->qsfp_info.reset_needed = 0;
6033 ppd->qsfp_info.limiting_active = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006034 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006035 flags);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006036 /* Invert the ModPresent pin now to detect plug-in */
6037 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6038 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006039
6040 if ((ppd->offline_disabled_reason >
6041 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006042 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
Bryan Morgana9c05e32016-02-03 14:30:49 -08006043 (ppd->offline_disabled_reason ==
6044 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6045 ppd->offline_disabled_reason =
6046 HFI1_ODR_MASK(
Easwar Hariharane1bf0d52016-02-03 14:36:58 -08006047 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
Bryan Morgana9c05e32016-02-03 14:30:49 -08006048
Mike Marciniszyn77241052015-07-30 15:17:43 -04006049 if (ppd->host_link_state == HLS_DN_POLL) {
6050 /*
6051 * The link is still in POLL. This means
6052 * that the normal link down processing
6053 * will not happen. We have to do it here
6054 * before turning the DC off.
6055 */
6056 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6057 }
6058 } else {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006059 dd_dev_info(dd, "%s: QSFP module inserted\n",
6060 __func__);
6061
Mike Marciniszyn77241052015-07-30 15:17:43 -04006062 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6063 ppd->qsfp_info.cache_valid = 0;
6064 ppd->qsfp_info.cache_refresh_required = 1;
6065 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08006066 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006067
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006068 /*
6069 * Stop inversion of ModPresent pin to detect
6070 * removal of the cable
6071 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006072 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006073 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6074 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6075
6076 ppd->offline_disabled_reason =
6077 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006078 }
6079 }
6080
6081 if (reg & QSFP_HFI0_INT_N) {
Easwar Hariharane8aa2842016-02-18 11:12:16 -08006082 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006083 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006084 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6085 ppd->qsfp_info.check_interrupt_flags = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006086 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6087 }
6088
6089 /* Schedule the QSFP work only if there is a cable attached. */
6090 if (qsfp_mod_present(ppd))
6091 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6092}
6093
6094static int request_host_lcb_access(struct hfi1_devdata *dd)
6095{
6096 int ret;
6097
6098 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006099 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6100 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006101 if (ret != HCMD_SUCCESS) {
6102 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006103 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006104 }
6105 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6106}
6107
6108static int request_8051_lcb_access(struct hfi1_devdata *dd)
6109{
6110 int ret;
6111
6112 ret = do_8051_command(dd, HCMD_MISC,
Jubin John17fb4f22016-02-14 20:21:52 -08006113 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6114 LOAD_DATA_FIELD_ID_SHIFT, NULL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006115 if (ret != HCMD_SUCCESS) {
6116 dd_dev_err(dd, "%s: command failed with error %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006117 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006118 }
6119 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6120}
6121
6122/*
6123 * Set the LCB selector - allow host access. The DCC selector always
6124 * points to the host.
6125 */
6126static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6127{
6128 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006129 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6130 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006131}
6132
6133/*
6134 * Clear the LCB selector - allow 8051 access. The DCC selector always
6135 * points to the host.
6136 */
6137static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6138{
6139 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
Jubin John17fb4f22016-02-14 20:21:52 -08006140 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006141}
6142
6143/*
6144 * Acquire LCB access from the 8051. If the host already has access,
6145 * just increment a counter. Otherwise, inform the 8051 that the
6146 * host is taking access.
6147 *
6148 * Returns:
6149 * 0 on success
6150 * -EBUSY if the 8051 has control and cannot be disturbed
6151 * -errno if unable to acquire access from the 8051
6152 */
6153int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6154{
6155 struct hfi1_pportdata *ppd = dd->pport;
6156 int ret = 0;
6157
6158 /*
6159 * Use the host link state lock so the operation of this routine
6160 * { link state check, selector change, count increment } can occur
6161 * as a unit against a link state change. Otherwise there is a
6162 * race between the state change and the count increment.
6163 */
6164 if (sleep_ok) {
6165 mutex_lock(&ppd->hls_lock);
6166 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006167 while (!mutex_trylock(&ppd->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006168 udelay(1);
6169 }
6170
6171 /* this access is valid only when the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07006172 if (ppd->host_link_state & HLS_DOWN) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006173 dd_dev_info(dd, "%s: link state %s not up\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006174 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04006175 ret = -EBUSY;
6176 goto done;
6177 }
6178
6179 if (dd->lcb_access_count == 0) {
6180 ret = request_host_lcb_access(dd);
6181 if (ret) {
6182 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006183 "%s: unable to acquire LCB access, err %d\n",
6184 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006185 goto done;
6186 }
6187 set_host_lcb_access(dd);
6188 }
6189 dd->lcb_access_count++;
6190done:
6191 mutex_unlock(&ppd->hls_lock);
6192 return ret;
6193}
6194
6195/*
6196 * Release LCB access by decrementing the use count. If the count is moving
6197 * from 1 to 0, inform 8051 that it has control back.
6198 *
6199 * Returns:
6200 * 0 on success
6201 * -errno if unable to release access to the 8051
6202 */
6203int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6204{
6205 int ret = 0;
6206
6207 /*
6208 * Use the host link state lock because the acquire needed it.
6209 * Here, we only need to keep { selector change, count decrement }
6210 * as a unit.
6211 */
6212 if (sleep_ok) {
6213 mutex_lock(&dd->pport->hls_lock);
6214 } else {
Dan Carpenter951842b2015-09-16 09:22:51 +03006215 while (!mutex_trylock(&dd->pport->hls_lock))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006216 udelay(1);
6217 }
6218
6219 if (dd->lcb_access_count == 0) {
6220 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006221 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006222 goto done;
6223 }
6224
6225 if (dd->lcb_access_count == 1) {
6226 set_8051_lcb_access(dd);
6227 ret = request_8051_lcb_access(dd);
6228 if (ret) {
6229 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006230 "%s: unable to release LCB access, err %d\n",
6231 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006232 /* restore host access if the grant didn't work */
6233 set_host_lcb_access(dd);
6234 goto done;
6235 }
6236 }
6237 dd->lcb_access_count--;
6238done:
6239 mutex_unlock(&dd->pport->hls_lock);
6240 return ret;
6241}
6242
6243/*
6244 * Initialize LCB access variables and state. Called during driver load,
6245 * after most of the initialization is finished.
6246 *
6247 * The DC default is LCB access on for the host. The driver defaults to
6248 * leaving access to the 8051. Assign access now - this constrains the call
6249 * to this routine to be after all LCB set-up is done. In particular, after
6250 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6251 */
6252static void init_lcb_access(struct hfi1_devdata *dd)
6253{
6254 dd->lcb_access_count = 0;
6255}
6256
6257/*
6258 * Write a response back to a 8051 request.
6259 */
6260static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6261{
6262 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
Jubin John17fb4f22016-02-14 20:21:52 -08006263 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6264 (u64)return_code <<
6265 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6266 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006267}
6268
6269/*
Easwar Hariharancbac3862016-02-03 14:31:31 -08006270 * Handle host requests from the 8051.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006271 */
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006272static void handle_8051_request(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006273{
Easwar Hariharancbac3862016-02-03 14:31:31 -08006274 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006275 u64 reg;
Easwar Hariharancbac3862016-02-03 14:31:31 -08006276 u16 data = 0;
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006277 u8 type;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006278
6279 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6280 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6281 return; /* no request */
6282
6283 /* zero out COMPLETED so the response is seen */
6284 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6285
6286 /* extract request details */
6287 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6288 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6289 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6290 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6291
6292 switch (type) {
6293 case HREQ_LOAD_CONFIG:
6294 case HREQ_SAVE_CONFIG:
6295 case HREQ_READ_CONFIG:
6296 case HREQ_SET_TX_EQ_ABS:
6297 case HREQ_SET_TX_EQ_REL:
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07006298 case HREQ_ENABLE:
Mike Marciniszyn77241052015-07-30 15:17:43 -04006299 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006300 type);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006301 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6302 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006303 case HREQ_CONFIG_DONE:
6304 hreq_response(dd, HREQ_SUCCESS, 0);
6305 break;
6306
6307 case HREQ_INTERFACE_TEST:
6308 hreq_response(dd, HREQ_SUCCESS, data);
6309 break;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006310 default:
6311 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6312 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6313 break;
6314 }
6315}
6316
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006317/*
6318 * Set up allocation unit vaulue.
6319 */
6320void set_up_vau(struct hfi1_devdata *dd, u8 vau)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006321{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006322 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6323
6324 /* do not modify other values in the register */
6325 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6326 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6327 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006328}
6329
6330/*
6331 * Set up initial VL15 credits of the remote. Assumes the rest of
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006332 * the CM credit registers are zero from a previous global or credit reset.
6333 * Shared limit for VL15 will always be 0.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006334 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006335void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006336{
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006337 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6338
6339 /* set initial values for total and shared credit limit */
6340 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6341 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6342
6343 /*
6344 * Set total limit to be equal to VL15 credits.
6345 * Leave shared limit at 0.
6346 */
6347 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6348 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006349
Dennis Dalessandroeacc8302016-10-17 04:19:52 -07006350 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6351 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006352}
6353
6354/*
6355 * Zero all credit details from the previous connection and
6356 * reset the CM manager's internal counters.
6357 */
6358void reset_link_credits(struct hfi1_devdata *dd)
6359{
6360 int i;
6361
6362 /* remove all previous VL credit limits */
6363 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -08006364 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006365 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006366 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006367 /* reset the CM block */
6368 pio_send_control(dd, PSC_CM_RESET);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006369 /* reset cached value */
6370 dd->vl15buf_cached = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006371}
6372
6373/* convert a vCU to a CU */
6374static u32 vcu_to_cu(u8 vcu)
6375{
6376 return 1 << vcu;
6377}
6378
6379/* convert a CU to a vCU */
6380static u8 cu_to_vcu(u32 cu)
6381{
6382 return ilog2(cu);
6383}
6384
6385/* convert a vAU to an AU */
6386static u32 vau_to_au(u8 vau)
6387{
6388 return 8 * (1 << vau);
6389}
6390
6391static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6392{
6393 ppd->sm_trap_qp = 0x0;
6394 ppd->sa_qp = 0x1;
6395}
6396
6397/*
6398 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6399 */
6400static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6401{
6402 u64 reg;
6403
6404 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6405 write_csr(dd, DC_LCB_CFG_RUN, 0);
6406 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6407 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
Jubin John17fb4f22016-02-14 20:21:52 -08006408 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006409 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6410 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6411 reg = read_csr(dd, DCC_CFG_RESET);
Jubin John17fb4f22016-02-14 20:21:52 -08006412 write_csr(dd, DCC_CFG_RESET, reg |
6413 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6414 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
Jubin John50e5dcb2016-02-14 20:19:41 -08006415 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006416 if (!abort) {
6417 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6418 write_csr(dd, DCC_CFG_RESET, reg);
6419 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6420 }
6421}
6422
6423/*
6424 * This routine should be called after the link has been transitioned to
6425 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6426 * reset).
6427 *
6428 * The expectation is that the caller of this routine would have taken
6429 * care of properly transitioning the link into the correct state.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006430 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6431 * before calling this function.
Mike Marciniszyn77241052015-07-30 15:17:43 -04006432 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006433static void _dc_shutdown(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006434{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006435 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006436
Tadeusz Struk22546b72017-04-28 10:40:02 -07006437 if (dd->dc_shutdown)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006438 return;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006439
Mike Marciniszyn77241052015-07-30 15:17:43 -04006440 dd->dc_shutdown = 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006441 /* Shutdown the LCB */
6442 lcb_shutdown(dd, 1);
Jubin John4d114fd2016-02-14 20:21:43 -08006443 /*
6444 * Going to OFFLINE would have causes the 8051 to put the
Mike Marciniszyn77241052015-07-30 15:17:43 -04006445 * SerDes into reset already. Just need to shut down the 8051,
Jubin John4d114fd2016-02-14 20:21:43 -08006446 * itself.
6447 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006448 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6449}
6450
Tadeusz Struk22546b72017-04-28 10:40:02 -07006451static void dc_shutdown(struct hfi1_devdata *dd)
6452{
6453 mutex_lock(&dd->dc8051_lock);
6454 _dc_shutdown(dd);
6455 mutex_unlock(&dd->dc8051_lock);
6456}
6457
Jubin John4d114fd2016-02-14 20:21:43 -08006458/*
6459 * Calling this after the DC has been brought out of reset should not
6460 * do any damage.
Tadeusz Struk22546b72017-04-28 10:40:02 -07006461 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6462 * before calling this function.
Jubin John4d114fd2016-02-14 20:21:43 -08006463 */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006464static void _dc_start(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04006465{
Tadeusz Struk22546b72017-04-28 10:40:02 -07006466 lockdep_assert_held(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006467
Mike Marciniszyn77241052015-07-30 15:17:43 -04006468 if (!dd->dc_shutdown)
Tadeusz Struk22546b72017-04-28 10:40:02 -07006469 return;
6470
Mike Marciniszyn77241052015-07-30 15:17:43 -04006471 /* Take the 8051 out of reset */
6472 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6473 /* Wait until 8051 is ready */
Tadeusz Struk22546b72017-04-28 10:40:02 -07006474 if (wait_fm_ready(dd, TIMEOUT_8051_START))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006475 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
Jubin John17fb4f22016-02-14 20:21:52 -08006476 __func__);
Tadeusz Struk22546b72017-04-28 10:40:02 -07006477
Mike Marciniszyn77241052015-07-30 15:17:43 -04006478 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6479 write_csr(dd, DCC_CFG_RESET, 0x10);
6480 /* lcb_shutdown() with abort=1 does not restore these */
6481 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006482 dd->dc_shutdown = 0;
Tadeusz Struk22546b72017-04-28 10:40:02 -07006483}
6484
6485static void dc_start(struct hfi1_devdata *dd)
6486{
6487 mutex_lock(&dd->dc8051_lock);
6488 _dc_start(dd);
6489 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006490}
6491
6492/*
6493 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6494 */
6495static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6496{
6497 u64 rx_radr, tx_radr;
6498 u32 version;
6499
6500 if (dd->icode != ICODE_FPGA_EMULATION)
6501 return;
6502
6503 /*
6504 * These LCB defaults on emulator _s are good, nothing to do here:
6505 * LCB_CFG_TX_FIFOS_RADR
6506 * LCB_CFG_RX_FIFOS_RADR
6507 * LCB_CFG_LN_DCLK
6508 * LCB_CFG_IGNORE_LOST_RCLK
6509 */
6510 if (is_emulator_s(dd))
6511 return;
6512 /* else this is _p */
6513
6514 version = emulator_rev(dd);
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006515 if (!is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04006516 version = 0x2d; /* all B0 use 0x2d or higher settings */
6517
6518 if (version <= 0x12) {
6519 /* release 0x12 and below */
6520
6521 /*
6522 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6523 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6524 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6525 */
6526 rx_radr =
6527 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6528 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6529 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6530 /*
6531 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6532 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6533 */
6534 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6535 } else if (version <= 0x18) {
6536 /* release 0x13 up to 0x18 */
6537 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6538 rx_radr =
6539 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6540 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6541 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6542 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6543 } else if (version == 0x19) {
6544 /* release 0x19 */
6545 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6546 rx_radr =
6547 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6548 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6549 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6550 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6551 } else if (version == 0x1a) {
6552 /* release 0x1a */
6553 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6554 rx_radr =
6555 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6556 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6557 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6558 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6559 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6560 } else {
6561 /* release 0x1b and higher */
6562 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6563 rx_radr =
6564 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6565 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6566 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6567 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6568 }
6569
6570 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6571 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6572 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
Jubin John17fb4f22016-02-14 20:21:52 -08006573 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006574 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6575}
6576
6577/*
6578 * Handle a SMA idle message
6579 *
6580 * This is a work-queue function outside of the interrupt.
6581 */
6582void handle_sma_message(struct work_struct *work)
6583{
6584 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6585 sma_message_work);
6586 struct hfi1_devdata *dd = ppd->dd;
6587 u64 msg;
6588 int ret;
6589
Jubin John4d114fd2016-02-14 20:21:43 -08006590 /*
6591 * msg is bytes 1-4 of the 40-bit idle message - the command code
6592 * is stripped off
6593 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006594 ret = read_idle_sma(dd, &msg);
6595 if (ret)
6596 return;
6597 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6598 /*
6599 * React to the SMA message. Byte[1] (0 for us) is the command.
6600 */
6601 switch (msg & 0xff) {
6602 case SMA_IDLE_ARM:
6603 /*
6604 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6605 * State Transitions
6606 *
6607 * Only expected in INIT or ARMED, discard otherwise.
6608 */
6609 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6610 ppd->neighbor_normal = 1;
6611 break;
6612 case SMA_IDLE_ACTIVE:
6613 /*
6614 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6615 * State Transitions
6616 *
6617 * Can activate the node. Discard otherwise.
6618 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08006619 if (ppd->host_link_state == HLS_UP_ARMED &&
6620 ppd->is_active_optimize_enabled) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006621 ppd->neighbor_normal = 1;
6622 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6623 if (ret)
6624 dd_dev_err(
6625 dd,
6626 "%s: received Active SMA idle message, couldn't set link to Active\n",
6627 __func__);
6628 }
6629 break;
6630 default:
6631 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006632 "%s: received unexpected SMA idle message 0x%llx\n",
6633 __func__, msg);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006634 break;
6635 }
6636}
6637
6638static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6639{
6640 u64 rcvctrl;
6641 unsigned long flags;
6642
6643 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6644 rcvctrl = read_csr(dd, RCV_CTRL);
6645 rcvctrl |= add;
6646 rcvctrl &= ~clear;
6647 write_csr(dd, RCV_CTRL, rcvctrl);
6648 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6649}
6650
6651static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6652{
6653 adjust_rcvctrl(dd, add, 0);
6654}
6655
6656static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6657{
6658 adjust_rcvctrl(dd, 0, clear);
6659}
6660
6661/*
6662 * Called from all interrupt handlers to start handling an SPC freeze.
6663 */
6664void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6665{
6666 struct hfi1_devdata *dd = ppd->dd;
6667 struct send_context *sc;
6668 int i;
6669
6670 if (flags & FREEZE_SELF)
6671 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6672
6673 /* enter frozen mode */
6674 dd->flags |= HFI1_FROZEN;
6675
6676 /* notify all SDMA engines that they are going into a freeze */
6677 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6678
6679 /* do halt pre-handling on all enabled send contexts */
6680 for (i = 0; i < dd->num_send_contexts; i++) {
6681 sc = dd->send_contexts[i].sc;
6682 if (sc && (sc->flags & SCF_ENABLED))
6683 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6684 }
6685
6686 /* Send context are frozen. Notify user space */
6687 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6688
6689 if (flags & FREEZE_ABORT) {
6690 dd_dev_err(dd,
6691 "Aborted freeze recovery. Please REBOOT system\n");
6692 return;
6693 }
6694 /* queue non-interrupt handler */
6695 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6696}
6697
6698/*
6699 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6700 * depending on the "freeze" parameter.
6701 *
6702 * No need to return an error if it times out, our only option
6703 * is to proceed anyway.
6704 */
6705static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6706{
6707 unsigned long timeout;
6708 u64 reg;
6709
6710 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6711 while (1) {
6712 reg = read_csr(dd, CCE_STATUS);
6713 if (freeze) {
6714 /* waiting until all indicators are set */
6715 if ((reg & ALL_FROZE) == ALL_FROZE)
6716 return; /* all done */
6717 } else {
6718 /* waiting until all indicators are clear */
6719 if ((reg & ALL_FROZE) == 0)
6720 return; /* all done */
6721 }
6722
6723 if (time_after(jiffies, timeout)) {
6724 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006725 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6726 freeze ? "" : "un", reg & ALL_FROZE,
6727 freeze ? ALL_FROZE : 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006728 return;
6729 }
6730 usleep_range(80, 120);
6731 }
6732}
6733
6734/*
6735 * Do all freeze handling for the RXE block.
6736 */
6737static void rxe_freeze(struct hfi1_devdata *dd)
6738{
6739 int i;
6740
6741 /* disable port */
6742 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6743
6744 /* disable all receive contexts */
6745 for (i = 0; i < dd->num_rcv_contexts; i++)
6746 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6747}
6748
6749/*
6750 * Unfreeze handling for the RXE block - kernel contexts only.
6751 * This will also enable the port. User contexts will do unfreeze
6752 * handling on a per-context basis as they call into the driver.
6753 *
6754 */
6755static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6756{
Mitko Haralanov566c1572016-02-03 14:32:49 -08006757 u32 rcvmask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006758 int i;
6759
6760 /* enable all kernel contexts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07006761 for (i = 0; i < dd->num_rcv_contexts; i++) {
6762 struct hfi1_ctxtdata *rcd = dd->rcd[i];
6763
6764 /* Ensure all non-user contexts(including vnic) are enabled */
6765 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
6766 continue;
6767
Mitko Haralanov566c1572016-02-03 14:32:49 -08006768 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6769 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6770 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6771 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6772 hfi1_rcvctrl(dd, rcvmask, i);
6773 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04006774
6775 /* enable port */
6776 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6777}
6778
6779/*
6780 * Non-interrupt SPC freeze handling.
6781 *
6782 * This is a work-queue function outside of the triggering interrupt.
6783 */
6784void handle_freeze(struct work_struct *work)
6785{
6786 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6787 freeze_work);
6788 struct hfi1_devdata *dd = ppd->dd;
6789
6790 /* wait for freeze indicators on all affected blocks */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006791 wait_for_freeze_status(dd, 1);
6792
6793 /* SPC is now frozen */
6794
6795 /* do send PIO freeze steps */
6796 pio_freeze(dd);
6797
6798 /* do send DMA freeze steps */
6799 sdma_freeze(dd);
6800
6801 /* do send egress freeze steps - nothing to do */
6802
6803 /* do receive freeze steps */
6804 rxe_freeze(dd);
6805
6806 /*
6807 * Unfreeze the hardware - clear the freeze, wait for each
6808 * block's frozen bit to clear, then clear the frozen flag.
6809 */
6810 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6811 wait_for_freeze_status(dd, 0);
6812
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05006813 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04006814 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6815 wait_for_freeze_status(dd, 1);
6816 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6817 wait_for_freeze_status(dd, 0);
6818 }
6819
6820 /* do send PIO unfreeze steps for kernel contexts */
6821 pio_kernel_unfreeze(dd);
6822
6823 /* do send DMA unfreeze steps */
6824 sdma_unfreeze(dd);
6825
6826 /* do send egress unfreeze steps - nothing to do */
6827
6828 /* do receive unfreeze steps for kernel contexts */
6829 rxe_kernel_unfreeze(dd);
6830
6831 /*
6832 * The unfreeze procedure touches global device registers when
6833 * it disables and re-enables RXE. Mark the device unfrozen
6834 * after all that is done so other parts of the driver waiting
6835 * for the device to unfreeze don't do things out of order.
6836 *
6837 * The above implies that the meaning of HFI1_FROZEN flag is
6838 * "Device has gone into freeze mode and freeze mode handling
6839 * is still in progress."
6840 *
6841 * The flag will be removed when freeze mode processing has
6842 * completed.
6843 */
6844 dd->flags &= ~HFI1_FROZEN;
6845 wake_up(&dd->event_queue);
6846
6847 /* no longer frozen */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006848}
6849
6850/*
6851 * Handle a link up interrupt from the 8051.
6852 *
6853 * This is a work-queue function outside of the interrupt.
6854 */
6855void handle_link_up(struct work_struct *work)
6856{
6857 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Jubin John17fb4f22016-02-14 20:21:52 -08006858 link_up_work);
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006859 struct hfi1_devdata *dd = ppd->dd;
6860
Mike Marciniszyn77241052015-07-30 15:17:43 -04006861 set_link_state(ppd, HLS_UP_INIT);
6862
6863 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006864 read_ltp_rtt(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006865 /*
6866 * OPA specifies that certain counters are cleared on a transition
6867 * to link up, so do that.
6868 */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006869 clear_linkup_counters(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006870 /*
6871 * And (re)set link up default values.
6872 */
6873 set_linkup_defaults(ppd);
6874
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006875 /*
6876 * Set VL15 credits. Use cached value from verify cap interrupt.
6877 * In case of quick linkup or simulator, vl15 value will be set by
6878 * handle_linkup_change. VerifyCap interrupt handler will not be
6879 * called in those scenarios.
6880 */
6881 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6882 set_up_vl15(dd, dd->vl15buf_cached);
6883
Mike Marciniszyn77241052015-07-30 15:17:43 -04006884 /* enforce link speed enabled */
6885 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6886 /* oops - current speed is not enabled, bounce */
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07006887 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08006888 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6889 ppd->link_speed_active, ppd->link_speed_enabled);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006890 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08006891 OPA_LINKDOWN_REASON_SPEED_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04006892 set_link_state(ppd, HLS_DN_OFFLINE);
6893 start_link(ppd);
6894 }
6895}
6896
Jubin John4d114fd2016-02-14 20:21:43 -08006897/*
6898 * Several pieces of LNI information were cached for SMA in ppd.
6899 * Reset these on link down
6900 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04006901static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6902{
6903 ppd->neighbor_guid = 0;
6904 ppd->neighbor_port_number = 0;
6905 ppd->neighbor_type = 0;
6906 ppd->neighbor_fm_security = 0;
6907}
6908
Dean Luickfeb831d2016-04-14 08:31:36 -07006909static const char * const link_down_reason_strs[] = {
6910 [OPA_LINKDOWN_REASON_NONE] = "None",
Dennis Dalessandro67838e62017-05-29 17:18:46 -07006911 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
Dean Luickfeb831d2016-04-14 08:31:36 -07006912 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6913 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6914 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6915 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6916 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6917 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6918 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6919 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6920 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6921 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6922 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6923 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6924 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6925 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6926 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6927 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6928 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6929 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6930 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6931 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6932 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6933 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6934 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6935 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6936 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6937 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6938 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6939 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6940 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6941 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6942 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6943 "Excessive buffer overrun",
6944 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6945 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6946 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6947 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6948 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6949 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6950 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6951 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6952 "Local media not installed",
6953 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6954 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6955 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6956 "End to end not installed",
6957 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6958 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6959 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6960 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6961 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6962 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6963};
6964
6965/* return the neighbor link down reason string */
6966static const char *link_down_reason_str(u8 reason)
6967{
6968 const char *str = NULL;
6969
6970 if (reason < ARRAY_SIZE(link_down_reason_strs))
6971 str = link_down_reason_strs[reason];
6972 if (!str)
6973 str = "(invalid)";
6974
6975 return str;
6976}
6977
Mike Marciniszyn77241052015-07-30 15:17:43 -04006978/*
6979 * Handle a link down interrupt from the 8051.
6980 *
6981 * This is a work-queue function outside of the interrupt.
6982 */
6983void handle_link_down(struct work_struct *work)
6984{
6985 u8 lcl_reason, neigh_reason = 0;
Dean Luickfeb831d2016-04-14 08:31:36 -07006986 u8 link_down_reason;
Mike Marciniszyn77241052015-07-30 15:17:43 -04006987 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
Dean Luickfeb831d2016-04-14 08:31:36 -07006988 link_down_work);
6989 int was_up;
6990 static const char ldr_str[] = "Link down reason: ";
Mike Marciniszyn77241052015-07-30 15:17:43 -04006991
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08006992 if ((ppd->host_link_state &
6993 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6994 ppd->port_type == PORT_TYPE_FIXED)
6995 ppd->offline_disabled_reason =
6996 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6997
6998 /* Go offline first, then deal with reading/writing through 8051 */
Dean Luickfeb831d2016-04-14 08:31:36 -07006999 was_up = !!(ppd->host_link_state & HLS_UP);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007000 set_link_state(ppd, HLS_DN_OFFLINE);
7001
Dean Luickfeb831d2016-04-14 08:31:36 -07007002 if (was_up) {
7003 lcl_reason = 0;
7004 /* link down reason is only valid if the link was up */
7005 read_link_down_reason(ppd->dd, &link_down_reason);
7006 switch (link_down_reason) {
7007 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7008 /* the link went down, no idle message reason */
7009 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7010 ldr_str);
7011 break;
7012 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7013 /*
7014 * The neighbor reason is only valid if an idle message
7015 * was received for it.
7016 */
7017 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7018 dd_dev_info(ppd->dd,
7019 "%sNeighbor link down message %d, %s\n",
7020 ldr_str, neigh_reason,
7021 link_down_reason_str(neigh_reason));
7022 break;
7023 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7024 dd_dev_info(ppd->dd,
7025 "%sHost requested link to go offline\n",
7026 ldr_str);
7027 break;
7028 default:
7029 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7030 ldr_str, link_down_reason);
7031 break;
7032 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007033
Dean Luickfeb831d2016-04-14 08:31:36 -07007034 /*
7035 * If no reason, assume peer-initiated but missed
7036 * LinkGoingDown idle flits.
7037 */
7038 if (neigh_reason == 0)
7039 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7040 } else {
7041 /* went down while polling or going up */
7042 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7043 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007044
7045 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7046
Dean Luick015e91f2016-04-14 08:31:42 -07007047 /* inform the SMA when the link transitions from up to down */
7048 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7049 ppd->neigh_link_down_reason.sma == 0) {
7050 ppd->local_link_down_reason.sma =
7051 ppd->local_link_down_reason.latest;
7052 ppd->neigh_link_down_reason.sma =
7053 ppd->neigh_link_down_reason.latest;
7054 }
7055
Mike Marciniszyn77241052015-07-30 15:17:43 -04007056 reset_neighbor_info(ppd);
7057
7058 /* disable the port */
7059 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7060
Jubin John4d114fd2016-02-14 20:21:43 -08007061 /*
7062 * If there is no cable attached, turn the DC off. Otherwise,
7063 * start the link bring up.
7064 */
Dean Luick0db9dec2016-09-06 04:35:20 -07007065 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
Mike Marciniszyn77241052015-07-30 15:17:43 -04007066 dc_shutdown(ppd->dd);
Dean Luick0db9dec2016-09-06 04:35:20 -07007067 else
Mike Marciniszyn77241052015-07-30 15:17:43 -04007068 start_link(ppd);
7069}
7070
7071void handle_link_bounce(struct work_struct *work)
7072{
7073 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7074 link_bounce_work);
7075
7076 /*
7077 * Only do something if the link is currently up.
7078 */
7079 if (ppd->host_link_state & HLS_UP) {
7080 set_link_state(ppd, HLS_DN_OFFLINE);
7081 start_link(ppd);
7082 } else {
7083 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007084 __func__, link_state_name(ppd->host_link_state));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007085 }
7086}
7087
7088/*
7089 * Mask conversion: Capability exchange to Port LTP. The capability
7090 * exchange has an implicit 16b CRC that is mandatory.
7091 */
7092static int cap_to_port_ltp(int cap)
7093{
7094 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7095
7096 if (cap & CAP_CRC_14B)
7097 port_ltp |= PORT_LTP_CRC_MODE_14;
7098 if (cap & CAP_CRC_48B)
7099 port_ltp |= PORT_LTP_CRC_MODE_48;
7100 if (cap & CAP_CRC_12B_16B_PER_LANE)
7101 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7102
7103 return port_ltp;
7104}
7105
7106/*
7107 * Convert an OPA Port LTP mask to capability mask
7108 */
7109int port_ltp_to_cap(int port_ltp)
7110{
7111 int cap_mask = 0;
7112
7113 if (port_ltp & PORT_LTP_CRC_MODE_14)
7114 cap_mask |= CAP_CRC_14B;
7115 if (port_ltp & PORT_LTP_CRC_MODE_48)
7116 cap_mask |= CAP_CRC_48B;
7117 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7118 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7119
7120 return cap_mask;
7121}
7122
7123/*
7124 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7125 */
7126static int lcb_to_port_ltp(int lcb_crc)
7127{
7128 int port_ltp = 0;
7129
7130 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7131 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7132 else if (lcb_crc == LCB_CRC_48B)
7133 port_ltp = PORT_LTP_CRC_MODE_48;
7134 else if (lcb_crc == LCB_CRC_14B)
7135 port_ltp = PORT_LTP_CRC_MODE_14;
7136 else
7137 port_ltp = PORT_LTP_CRC_MODE_16;
7138
7139 return port_ltp;
7140}
7141
7142/*
7143 * Our neighbor has indicated that we are allowed to act as a fabric
7144 * manager, so place the full management partition key in the second
7145 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7146 * that we should already have the limited management partition key in
7147 * array element 1, and also that the port is not yet up when
7148 * add_full_mgmt_pkey() is invoked.
7149 */
7150static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7151{
7152 struct hfi1_devdata *dd = ppd->dd;
7153
Dennis Dalessandroa498fbc2017-04-09 10:17:06 -07007154 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
Dean Luick87645222015-12-01 15:38:21 -05007155 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7156 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7157 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007158 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7159 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007160 hfi1_event_pkey_change(ppd->dd, ppd->port);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007161}
7162
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007163static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007164{
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007165 if (ppd->pkeys[2] != 0) {
7166 ppd->pkeys[2] = 0;
7167 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
Sebastian Sanchez34d351f2016-06-09 07:52:03 -07007168 hfi1_event_pkey_change(ppd->dd, ppd->port);
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07007169 }
Sebastian Sanchezce8b2fd2016-05-24 12:50:47 -07007170}
7171
Mike Marciniszyn77241052015-07-30 15:17:43 -04007172/*
7173 * Convert the given link width to the OPA link width bitmask.
7174 */
7175static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7176{
7177 switch (width) {
7178 case 0:
7179 /*
7180 * Simulator and quick linkup do not set the width.
7181 * Just set it to 4x without complaint.
7182 */
7183 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7184 return OPA_LINK_WIDTH_4X;
7185 return 0; /* no lanes up */
7186 case 1: return OPA_LINK_WIDTH_1X;
7187 case 2: return OPA_LINK_WIDTH_2X;
7188 case 3: return OPA_LINK_WIDTH_3X;
7189 default:
7190 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007191 __func__, width);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007192 /* fall through */
7193 case 4: return OPA_LINK_WIDTH_4X;
7194 }
7195}
7196
7197/*
7198 * Do a population count on the bottom nibble.
7199 */
7200static const u8 bit_counts[16] = {
7201 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7202};
Jubin Johnf4d507c2016-02-14 20:20:25 -08007203
Mike Marciniszyn77241052015-07-30 15:17:43 -04007204static inline u8 nibble_to_count(u8 nibble)
7205{
7206 return bit_counts[nibble & 0xf];
7207}
7208
7209/*
7210 * Read the active lane information from the 8051 registers and return
7211 * their widths.
7212 *
7213 * Active lane information is found in these 8051 registers:
7214 * enable_lane_tx
7215 * enable_lane_rx
7216 */
7217static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7218 u16 *rx_width)
7219{
7220 u16 tx, rx;
7221 u8 enable_lane_rx;
7222 u8 enable_lane_tx;
7223 u8 tx_polarity_inversion;
7224 u8 rx_polarity_inversion;
7225 u8 max_rate;
7226
7227 /* read the active lanes */
7228 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08007229 &rx_polarity_inversion, &max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007230 read_local_lni(dd, &enable_lane_rx);
7231
7232 /* convert to counts */
7233 tx = nibble_to_count(enable_lane_tx);
7234 rx = nibble_to_count(enable_lane_rx);
7235
7236 /*
7237 * Set link_speed_active here, overriding what was set in
7238 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7239 * set the max_rate field in handle_verify_cap until v0.19.
7240 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007241 if ((dd->icode == ICODE_RTL_SILICON) &&
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007242 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007243 /* max_rate: 0 = 12.5G, 1 = 25G */
7244 switch (max_rate) {
7245 case 0:
7246 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7247 break;
7248 default:
7249 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007250 "%s: unexpected max rate %d, using 25Gb\n",
7251 __func__, (int)max_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007252 /* fall through */
7253 case 1:
7254 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7255 break;
7256 }
7257 }
7258
7259 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007260 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7261 enable_lane_tx, tx, enable_lane_rx, rx);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007262 *tx_width = link_width_to_bits(dd, tx);
7263 *rx_width = link_width_to_bits(dd, rx);
7264}
7265
7266/*
7267 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7268 * Valid after the end of VerifyCap and during LinkUp. Does not change
7269 * after link up. I.e. look elsewhere for downgrade information.
7270 *
7271 * Bits are:
7272 * + bits [7:4] contain the number of active transmitters
7273 * + bits [3:0] contain the number of active receivers
7274 * These are numbers 1 through 4 and can be different values if the
7275 * link is asymmetric.
7276 *
7277 * verify_cap_local_fm_link_width[0] retains its original value.
7278 */
7279static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7280 u16 *rx_width)
7281{
7282 u16 widths, tx, rx;
7283 u8 misc_bits, local_flags;
7284 u16 active_tx, active_rx;
7285
7286 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7287 tx = widths >> 12;
7288 rx = (widths >> 8) & 0xf;
7289
7290 *tx_width = link_width_to_bits(dd, tx);
7291 *rx_width = link_width_to_bits(dd, rx);
7292
7293 /* print the active widths */
7294 get_link_widths(dd, &active_tx, &active_rx);
7295}
7296
7297/*
7298 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7299 * hardware information when the link first comes up.
7300 *
7301 * The link width is not available until after VerifyCap.AllFramesReceived
7302 * (the trigger for handle_verify_cap), so this is outside that routine
7303 * and should be called when the 8051 signals linkup.
7304 */
7305void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7306{
7307 u16 tx_width, rx_width;
7308
7309 /* get end-of-LNI link widths */
7310 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7311
7312 /* use tx_width as the link is supposed to be symmetric on link up */
7313 ppd->link_width_active = tx_width;
7314 /* link width downgrade active (LWD.A) starts out matching LW.A */
7315 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7316 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7317 /* per OPA spec, on link up LWD.E resets to LWD.S */
7318 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7319 /* cache the active egress rate (units {10^6 bits/sec]) */
7320 ppd->current_egress_rate = active_egress_rate(ppd);
7321}
7322
7323/*
7324 * Handle a verify capabilities interrupt from the 8051.
7325 *
7326 * This is a work-queue function outside of the interrupt.
7327 */
7328void handle_verify_cap(struct work_struct *work)
7329{
7330 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7331 link_vc_work);
7332 struct hfi1_devdata *dd = ppd->dd;
7333 u64 reg;
7334 u8 power_management;
7335 u8 continious;
7336 u8 vcu;
7337 u8 vau;
7338 u8 z;
7339 u16 vl15buf;
7340 u16 link_widths;
7341 u16 crc_mask;
7342 u16 crc_val;
7343 u16 device_id;
7344 u16 active_tx, active_rx;
7345 u8 partner_supported_crc;
7346 u8 remote_tx_rate;
7347 u8 device_rev;
7348
7349 set_link_state(ppd, HLS_VERIFY_CAP);
7350
7351 lcb_shutdown(dd, 0);
7352 adjust_lcb_for_fpga_serdes(dd);
7353
Mike Marciniszyn77241052015-07-30 15:17:43 -04007354 read_vc_remote_phy(dd, &power_management, &continious);
Jubin John17fb4f22016-02-14 20:21:52 -08007355 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7356 &partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007357 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7358 read_remote_device_id(dd, &device_id, &device_rev);
7359 /*
7360 * And the 'MgmtAllowed' information, which is exchanged during
7361 * LNI, is also be available at this point.
7362 */
7363 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7364 /* print the active widths */
7365 get_link_widths(dd, &active_tx, &active_rx);
7366 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007367 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7368 (int)power_management, (int)continious);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007369 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007370 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7371 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7372 (int)partner_supported_crc);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007373 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007374 (u32)remote_tx_rate, (u32)link_widths);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007375 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007376 (u32)device_id, (u32)device_rev);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007377 /*
7378 * The peer vAU value just read is the peer receiver value. HFI does
7379 * not support a transmit vAU of 0 (AU == 8). We advertised that
7380 * with Z=1 in the fabric capabilities sent to the peer. The peer
7381 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7382 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7383 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7384 * subject to the Z value exception.
7385 */
7386 if (vau == 0)
7387 vau = 1;
Byczkowski, Jakubb3e6b4b2017-05-12 09:01:37 -07007388 set_up_vau(dd, vau);
7389
7390 /*
7391 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7392 * credits value and wait for link-up interrupt ot set it.
7393 */
7394 set_up_vl15(dd, 0);
7395 dd->vl15buf_cached = vl15buf;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007396
7397 /* set up the LCB CRC mode */
7398 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7399
7400 /* order is important: use the lowest bit in common */
7401 if (crc_mask & CAP_CRC_14B)
7402 crc_val = LCB_CRC_14B;
7403 else if (crc_mask & CAP_CRC_48B)
7404 crc_val = LCB_CRC_48B;
7405 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7406 crc_val = LCB_CRC_12B_16B_PER_LANE;
7407 else
7408 crc_val = LCB_CRC_16B;
7409
7410 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7411 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7412 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7413
7414 /* set (14b only) or clear sideband credit */
7415 reg = read_csr(dd, SEND_CM_CTRL);
7416 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7417 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007418 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007419 } else {
7420 write_csr(dd, SEND_CM_CTRL,
Jubin John17fb4f22016-02-14 20:21:52 -08007421 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007422 }
7423
7424 ppd->link_speed_active = 0; /* invalid value */
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07007425 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007426 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7427 switch (remote_tx_rate) {
7428 case 0:
7429 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7430 break;
7431 case 1:
7432 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7433 break;
7434 }
7435 } else {
7436 /* actual rate is highest bit of the ANDed rates */
7437 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7438
7439 if (rate & 2)
7440 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7441 else if (rate & 1)
7442 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7443 }
7444 if (ppd->link_speed_active == 0) {
7445 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007446 __func__, (int)remote_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007447 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7448 }
7449
7450 /*
7451 * Cache the values of the supported, enabled, and active
7452 * LTP CRC modes to return in 'portinfo' queries. But the bit
7453 * flags that are returned in the portinfo query differ from
7454 * what's in the link_crc_mask, crc_sizes, and crc_val
7455 * variables. Convert these here.
7456 */
7457 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7458 /* supported crc modes */
7459 ppd->port_ltp_crc_mode |=
7460 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7461 /* enabled crc modes */
7462 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7463 /* active crc mode */
7464
7465 /* set up the remote credit return table */
7466 assign_remote_cm_au_table(dd, vcu);
7467
7468 /*
7469 * The LCB is reset on entry to handle_verify_cap(), so this must
7470 * be applied on every link up.
7471 *
7472 * Adjust LCB error kill enable to kill the link if
7473 * these RBUF errors are seen:
7474 * REPLAY_BUF_MBE_SMASK
7475 * FLIT_INPUT_BUF_MBE_SMASK
7476 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -05007477 if (is_ax(dd)) { /* fixed in B0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04007478 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7479 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7480 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7481 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7482 }
7483
7484 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7485 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7486
7487 /* give 8051 access to the LCB CSRs */
7488 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7489 set_8051_lcb_access(dd);
7490
Mike Marciniszyn77241052015-07-30 15:17:43 -04007491 if (ppd->mgmt_allowed)
7492 add_full_mgmt_pkey(ppd);
7493
7494 /* tell the 8051 to go to LinkUp */
7495 set_link_state(ppd, HLS_GOING_UP);
7496}
7497
7498/*
7499 * Apply the link width downgrade enabled policy against the current active
7500 * link widths.
7501 *
7502 * Called when the enabled policy changes or the active link widths change.
7503 */
7504void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7505{
Mike Marciniszyn77241052015-07-30 15:17:43 -04007506 int do_bounce = 0;
Dean Luick323fd782015-11-16 21:59:24 -05007507 int tries;
7508 u16 lwde;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007509 u16 tx, rx;
7510
Dean Luick323fd782015-11-16 21:59:24 -05007511 /* use the hls lock to avoid a race with actual link up */
7512 tries = 0;
7513retry:
Mike Marciniszyn77241052015-07-30 15:17:43 -04007514 mutex_lock(&ppd->hls_lock);
7515 /* only apply if the link is up */
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07007516 if (ppd->host_link_state & HLS_DOWN) {
Dean Luick323fd782015-11-16 21:59:24 -05007517 /* still going up..wait and retry */
7518 if (ppd->host_link_state & HLS_GOING_UP) {
7519 if (++tries < 1000) {
7520 mutex_unlock(&ppd->hls_lock);
7521 usleep_range(100, 120); /* arbitrary */
7522 goto retry;
7523 }
7524 dd_dev_err(ppd->dd,
7525 "%s: giving up waiting for link state change\n",
7526 __func__);
7527 }
7528 goto done;
7529 }
7530
7531 lwde = ppd->link_width_downgrade_enabled;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007532
7533 if (refresh_widths) {
7534 get_link_widths(ppd->dd, &tx, &rx);
7535 ppd->link_width_downgrade_tx_active = tx;
7536 ppd->link_width_downgrade_rx_active = rx;
7537 }
7538
Dean Luickf9b56352016-04-14 08:31:30 -07007539 if (ppd->link_width_downgrade_tx_active == 0 ||
7540 ppd->link_width_downgrade_rx_active == 0) {
7541 /* the 8051 reported a dead link as a downgrade */
7542 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7543 } else if (lwde == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007544 /* downgrade is disabled */
7545
7546 /* bounce if not at starting active width */
7547 if ((ppd->link_width_active !=
Jubin John17fb4f22016-02-14 20:21:52 -08007548 ppd->link_width_downgrade_tx_active) ||
7549 (ppd->link_width_active !=
7550 ppd->link_width_downgrade_rx_active)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007551 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007552 "Link downgrade is disabled and link has downgraded, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007553 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007554 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7555 ppd->link_width_active,
7556 ppd->link_width_downgrade_tx_active,
7557 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007558 do_bounce = 1;
7559 }
Jubin Johnd0d236e2016-02-14 20:20:15 -08007560 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7561 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007562 /* Tx or Rx is outside the enabled policy */
7563 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007564 "Link is outside of downgrade allowed, downing link\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007565 dd_dev_err(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08007566 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7567 lwde, ppd->link_width_downgrade_tx_active,
7568 ppd->link_width_downgrade_rx_active);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007569 do_bounce = 1;
7570 }
7571
Dean Luick323fd782015-11-16 21:59:24 -05007572done:
7573 mutex_unlock(&ppd->hls_lock);
7574
Mike Marciniszyn77241052015-07-30 15:17:43 -04007575 if (do_bounce) {
7576 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08007577 OPA_LINKDOWN_REASON_WIDTH_POLICY);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007578 set_link_state(ppd, HLS_DN_OFFLINE);
7579 start_link(ppd);
7580 }
7581}
7582
7583/*
7584 * Handle a link downgrade interrupt from the 8051.
7585 *
7586 * This is a work-queue function outside of the interrupt.
7587 */
7588void handle_link_downgrade(struct work_struct *work)
7589{
7590 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7591 link_downgrade_work);
7592
7593 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7594 apply_link_downgrade_policy(ppd, 1);
7595}
7596
7597static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7598{
7599 return flag_string(buf, buf_len, flags, dcc_err_flags,
7600 ARRAY_SIZE(dcc_err_flags));
7601}
7602
7603static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7604{
7605 return flag_string(buf, buf_len, flags, lcb_err_flags,
7606 ARRAY_SIZE(lcb_err_flags));
7607}
7608
7609static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7610{
7611 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7612 ARRAY_SIZE(dc8051_err_flags));
7613}
7614
7615static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7616{
7617 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7618 ARRAY_SIZE(dc8051_info_err_flags));
7619}
7620
7621static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7622{
7623 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7624 ARRAY_SIZE(dc8051_info_host_msg_flags));
7625}
7626
7627static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7628{
7629 struct hfi1_pportdata *ppd = dd->pport;
7630 u64 info, err, host_msg;
7631 int queue_link_down = 0;
7632 char buf[96];
7633
7634 /* look at the flags */
7635 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7636 /* 8051 information set by firmware */
7637 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7638 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7639 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7640 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7641 host_msg = (info >>
7642 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7643 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7644
7645 /*
7646 * Handle error flags.
7647 */
7648 if (err & FAILED_LNI) {
7649 /*
7650 * LNI error indications are cleared by the 8051
7651 * only when starting polling. Only pay attention
7652 * to them when in the states that occur during
7653 * LNI.
7654 */
7655 if (ppd->host_link_state
7656 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7657 queue_link_down = 1;
7658 dd_dev_info(dd, "Link error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007659 dc8051_info_err_string(buf,
7660 sizeof(buf),
7661 err &
7662 FAILED_LNI));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007663 }
7664 err &= ~(u64)FAILED_LNI;
7665 }
Dean Luick6d014532015-12-01 15:38:23 -05007666 /* unknown frames can happen durning LNI, just count */
7667 if (err & UNKNOWN_FRAME) {
7668 ppd->unknown_frame_count++;
7669 err &= ~(u64)UNKNOWN_FRAME;
7670 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04007671 if (err) {
7672 /* report remaining errors, but do not do anything */
7673 dd_dev_err(dd, "8051 info error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007674 dc8051_info_err_string(buf, sizeof(buf),
7675 err));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007676 }
7677
7678 /*
7679 * Handle host message flags.
7680 */
7681 if (host_msg & HOST_REQ_DONE) {
7682 /*
7683 * Presently, the driver does a busy wait for
7684 * host requests to complete. This is only an
7685 * informational message.
7686 * NOTE: The 8051 clears the host message
7687 * information *on the next 8051 command*.
7688 * Therefore, when linkup is achieved,
7689 * this flag will still be set.
7690 */
7691 host_msg &= ~(u64)HOST_REQ_DONE;
7692 }
7693 if (host_msg & BC_SMA_MSG) {
7694 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7695 host_msg &= ~(u64)BC_SMA_MSG;
7696 }
7697 if (host_msg & LINKUP_ACHIEVED) {
7698 dd_dev_info(dd, "8051: Link up\n");
7699 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7700 host_msg &= ~(u64)LINKUP_ACHIEVED;
7701 }
7702 if (host_msg & EXT_DEVICE_CFG_REQ) {
Easwar Hariharan145dd2b2016-04-12 11:25:31 -07007703 handle_8051_request(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007704 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7705 }
7706 if (host_msg & VERIFY_CAP_FRAME) {
7707 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7708 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7709 }
7710 if (host_msg & LINK_GOING_DOWN) {
7711 const char *extra = "";
7712 /* no downgrade action needed if going down */
7713 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7714 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7715 extra = " (ignoring downgrade)";
7716 }
7717 dd_dev_info(dd, "8051: Link down%s\n", extra);
7718 queue_link_down = 1;
7719 host_msg &= ~(u64)LINK_GOING_DOWN;
7720 }
7721 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7722 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7723 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7724 }
7725 if (host_msg) {
7726 /* report remaining messages, but do not do anything */
7727 dd_dev_info(dd, "8051 info host message: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007728 dc8051_info_host_msg_string(buf,
7729 sizeof(buf),
7730 host_msg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007731 }
7732
7733 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7734 }
7735 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7736 /*
7737 * Lost the 8051 heartbeat. If this happens, we
7738 * receive constant interrupts about it. Disable
7739 * the interrupt after the first.
7740 */
7741 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7742 write_csr(dd, DC_DC8051_ERR_EN,
Jubin John17fb4f22016-02-14 20:21:52 -08007743 read_csr(dd, DC_DC8051_ERR_EN) &
7744 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007745
7746 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7747 }
7748 if (reg) {
7749 /* report the error, but do not do anything */
7750 dd_dev_err(dd, "8051 error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007751 dc8051_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007752 }
7753
7754 if (queue_link_down) {
Jubin John4d114fd2016-02-14 20:21:43 -08007755 /*
7756 * if the link is already going down or disabled, do not
7757 * queue another
7758 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08007759 if ((ppd->host_link_state &
7760 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7761 ppd->link_enabled == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04007762 dd_dev_info(dd, "%s: not queuing link down\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007763 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007764 } else {
7765 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7766 }
7767 }
7768}
7769
7770static const char * const fm_config_txt[] = {
7771[0] =
7772 "BadHeadDist: Distance violation between two head flits",
7773[1] =
7774 "BadTailDist: Distance violation between two tail flits",
7775[2] =
7776 "BadCtrlDist: Distance violation between two credit control flits",
7777[3] =
7778 "BadCrdAck: Credits return for unsupported VL",
7779[4] =
7780 "UnsupportedVLMarker: Received VL Marker",
7781[5] =
7782 "BadPreempt: Exceeded the preemption nesting level",
7783[6] =
7784 "BadControlFlit: Received unsupported control flit",
7785/* no 7 */
7786[8] =
7787 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7788};
7789
7790static const char * const port_rcv_txt[] = {
7791[1] =
7792 "BadPktLen: Illegal PktLen",
7793[2] =
7794 "PktLenTooLong: Packet longer than PktLen",
7795[3] =
7796 "PktLenTooShort: Packet shorter than PktLen",
7797[4] =
7798 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7799[5] =
7800 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7801[6] =
7802 "BadL2: Illegal L2 opcode",
7803[7] =
7804 "BadSC: Unsupported SC",
7805[9] =
7806 "BadRC: Illegal RC",
7807[11] =
7808 "PreemptError: Preempting with same VL",
7809[12] =
7810 "PreemptVL15: Preempting a VL15 packet",
7811};
7812
7813#define OPA_LDR_FMCONFIG_OFFSET 16
7814#define OPA_LDR_PORTRCV_OFFSET 0
7815static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7816{
7817 u64 info, hdr0, hdr1;
7818 const char *extra;
7819 char buf[96];
7820 struct hfi1_pportdata *ppd = dd->pport;
7821 u8 lcl_reason = 0;
7822 int do_bounce = 0;
7823
7824 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7825 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7826 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7827 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7828 /* set status bit */
7829 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7830 }
7831 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7832 }
7833
7834 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7835 struct hfi1_pportdata *ppd = dd->pport;
7836 /* this counter saturates at (2^32) - 1 */
7837 if (ppd->link_downed < (u32)UINT_MAX)
7838 ppd->link_downed++;
7839 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7840 }
7841
7842 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7843 u8 reason_valid = 1;
7844
7845 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7846 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7847 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7848 /* set status bit */
7849 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7850 }
7851 switch (info) {
7852 case 0:
7853 case 1:
7854 case 2:
7855 case 3:
7856 case 4:
7857 case 5:
7858 case 6:
7859 extra = fm_config_txt[info];
7860 break;
7861 case 8:
7862 extra = fm_config_txt[info];
7863 if (ppd->port_error_action &
7864 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7865 do_bounce = 1;
7866 /*
7867 * lcl_reason cannot be derived from info
7868 * for this error
7869 */
7870 lcl_reason =
7871 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7872 }
7873 break;
7874 default:
7875 reason_valid = 0;
7876 snprintf(buf, sizeof(buf), "reserved%lld", info);
7877 extra = buf;
7878 break;
7879 }
7880
7881 if (reason_valid && !do_bounce) {
7882 do_bounce = ppd->port_error_action &
7883 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7884 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7885 }
7886
7887 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007888 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7889 extra);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007890 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7891 }
7892
7893 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7894 u8 reason_valid = 1;
7895
7896 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7897 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7898 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7899 if (!(dd->err_info_rcvport.status_and_code &
7900 OPA_EI_STATUS_SMASK)) {
7901 dd->err_info_rcvport.status_and_code =
7902 info & OPA_EI_CODE_SMASK;
7903 /* set status bit */
7904 dd->err_info_rcvport.status_and_code |=
7905 OPA_EI_STATUS_SMASK;
Jubin John4d114fd2016-02-14 20:21:43 -08007906 /*
7907 * save first 2 flits in the packet that caused
7908 * the error
7909 */
Bart Van Assche48a0cc132016-06-03 12:09:56 -07007910 dd->err_info_rcvport.packet_flit1 = hdr0;
7911 dd->err_info_rcvport.packet_flit2 = hdr1;
Mike Marciniszyn77241052015-07-30 15:17:43 -04007912 }
7913 switch (info) {
7914 case 1:
7915 case 2:
7916 case 3:
7917 case 4:
7918 case 5:
7919 case 6:
7920 case 7:
7921 case 9:
7922 case 11:
7923 case 12:
7924 extra = port_rcv_txt[info];
7925 break;
7926 default:
7927 reason_valid = 0;
7928 snprintf(buf, sizeof(buf), "reserved%lld", info);
7929 extra = buf;
7930 break;
7931 }
7932
7933 if (reason_valid && !do_bounce) {
7934 do_bounce = ppd->port_error_action &
7935 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7936 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7937 }
7938
7939 /* just report this */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007940 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7941 " hdr0 0x%llx, hdr1 0x%llx\n",
7942 extra, hdr0, hdr1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007943
7944 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7945 }
7946
7947 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7948 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007949 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007950 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7951 }
7952 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7953 /* informative only */
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007954 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04007955 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7956 }
7957
Don Hiatt243d9f42017-03-20 17:26:20 -07007958 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
7959 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
7960
Mike Marciniszyn77241052015-07-30 15:17:43 -04007961 /* report any remaining errors */
7962 if (reg)
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007963 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
7964 dcc_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007965
7966 if (lcl_reason == 0)
7967 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7968
7969 if (do_bounce) {
Jakub Byczkowskic27aad02017-02-08 05:27:55 -08007970 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
7971 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04007972 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7973 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7974 }
7975}
7976
7977static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7978{
7979 char buf[96];
7980
7981 dd_dev_info(dd, "LCB Error: %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -08007982 lcb_err_string(buf, sizeof(buf), reg));
Mike Marciniszyn77241052015-07-30 15:17:43 -04007983}
7984
7985/*
7986 * CCE block DC interrupt. Source is < 8.
7987 */
7988static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7989{
7990 const struct err_reg_info *eri = &dc_errs[source];
7991
7992 if (eri->handler) {
7993 interrupt_clear_down(dd, 0, eri);
7994 } else if (source == 3 /* dc_lbm_int */) {
7995 /*
7996 * This indicates that a parity error has occurred on the
7997 * address/control lines presented to the LBM. The error
7998 * is a single pulse, there is no associated error flag,
7999 * and it is non-maskable. This is because if a parity
8000 * error occurs on the request the request is dropped.
8001 * This should never occur, but it is nice to know if it
8002 * ever does.
8003 */
8004 dd_dev_err(dd, "Parity error in DC LBM block\n");
8005 } else {
8006 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8007 }
8008}
8009
8010/*
8011 * TX block send credit interrupt. Source is < 160.
8012 */
8013static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8014{
8015 sc_group_release_update(dd, source);
8016}
8017
8018/*
8019 * TX block SDMA interrupt. Source is < 48.
8020 *
8021 * SDMA interrupts are grouped by type:
8022 *
8023 * 0 - N-1 = SDma
8024 * N - 2N-1 = SDmaProgress
8025 * 2N - 3N-1 = SDmaIdle
8026 */
8027static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8028{
8029 /* what interrupt */
8030 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8031 /* which engine */
8032 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8033
8034#ifdef CONFIG_SDMA_VERBOSITY
8035 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8036 slashstrip(__FILE__), __LINE__, __func__);
8037 sdma_dumpstate(&dd->per_sdma[which]);
8038#endif
8039
8040 if (likely(what < 3 && which < dd->num_sdma)) {
8041 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8042 } else {
8043 /* should not happen */
8044 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8045 }
8046}
8047
8048/*
8049 * RX block receive available interrupt. Source is < 160.
8050 */
8051static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8052{
8053 struct hfi1_ctxtdata *rcd;
8054 char *err_detail;
8055
8056 if (likely(source < dd->num_rcv_contexts)) {
8057 rcd = dd->rcd[source];
8058 if (rcd) {
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008059 /* Check for non-user contexts, including vnic */
8060 if ((source < dd->first_dyn_alloc_ctxt) ||
8061 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
Dean Luickf4f30031c2015-10-26 10:28:44 -04008062 rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008063 else
8064 handle_user_interrupt(rcd);
8065 return; /* OK */
8066 }
8067 /* received an interrupt, but no rcd */
8068 err_detail = "dataless";
8069 } else {
8070 /* received an interrupt, but are not using that context */
8071 err_detail = "out of range";
8072 }
8073 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008074 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008075}
8076
8077/*
8078 * RX block receive urgent interrupt. Source is < 160.
8079 */
8080static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8081{
8082 struct hfi1_ctxtdata *rcd;
8083 char *err_detail;
8084
8085 if (likely(source < dd->num_rcv_contexts)) {
8086 rcd = dd->rcd[source];
8087 if (rcd) {
8088 /* only pay attention to user urgent interrupts */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -07008089 if ((source >= dd->first_dyn_alloc_ctxt) &&
8090 (!rcd->sc || (rcd->sc->type == SC_USER)))
Mike Marciniszyn77241052015-07-30 15:17:43 -04008091 handle_user_interrupt(rcd);
8092 return; /* OK */
8093 }
8094 /* received an interrupt, but no rcd */
8095 err_detail = "dataless";
8096 } else {
8097 /* received an interrupt, but are not using that context */
8098 err_detail = "out of range";
8099 }
8100 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008101 err_detail, source);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008102}
8103
8104/*
8105 * Reserved range interrupt. Should not be called in normal operation.
8106 */
8107static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8108{
8109 char name[64];
8110
8111 dd_dev_err(dd, "unexpected %s interrupt\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008112 is_reserved_name(name, sizeof(name), source));
Mike Marciniszyn77241052015-07-30 15:17:43 -04008113}
8114
8115static const struct is_table is_table[] = {
Jubin John4d114fd2016-02-14 20:21:43 -08008116/*
8117 * start end
8118 * name func interrupt func
8119 */
Mike Marciniszyn77241052015-07-30 15:17:43 -04008120{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8121 is_misc_err_name, is_misc_err_int },
8122{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8123 is_sdma_eng_err_name, is_sdma_eng_err_int },
8124{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8125 is_sendctxt_err_name, is_sendctxt_err_int },
8126{ IS_SDMA_START, IS_SDMA_END,
8127 is_sdma_eng_name, is_sdma_eng_int },
8128{ IS_VARIOUS_START, IS_VARIOUS_END,
8129 is_various_name, is_various_int },
8130{ IS_DC_START, IS_DC_END,
8131 is_dc_name, is_dc_int },
8132{ IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8133 is_rcv_avail_name, is_rcv_avail_int },
8134{ IS_RCVURGENT_START, IS_RCVURGENT_END,
8135 is_rcv_urgent_name, is_rcv_urgent_int },
8136{ IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8137 is_send_credit_name, is_send_credit_int},
8138{ IS_RESERVED_START, IS_RESERVED_END,
8139 is_reserved_name, is_reserved_int},
8140};
8141
8142/*
8143 * Interrupt source interrupt - called when the given source has an interrupt.
8144 * Source is a bit index into an array of 64-bit integers.
8145 */
8146static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8147{
8148 const struct is_table *entry;
8149
8150 /* avoids a double compare by walking the table in-order */
8151 for (entry = &is_table[0]; entry->is_name; entry++) {
8152 if (source < entry->end) {
8153 trace_hfi1_interrupt(dd, entry, source);
8154 entry->is_int(dd, source - entry->start);
8155 return;
8156 }
8157 }
8158 /* fell off the end */
8159 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8160}
8161
8162/*
8163 * General interrupt handler. This is able to correctly handle
8164 * all interrupts in case INTx is used.
8165 */
8166static irqreturn_t general_interrupt(int irq, void *data)
8167{
8168 struct hfi1_devdata *dd = data;
8169 u64 regs[CCE_NUM_INT_CSRS];
8170 u32 bit;
8171 int i;
8172
8173 this_cpu_inc(*dd->int_counter);
8174
8175 /* phase 1: scan and clear all handled interrupts */
8176 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8177 if (dd->gi_mask[i] == 0) {
8178 regs[i] = 0; /* used later */
8179 continue;
8180 }
8181 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8182 dd->gi_mask[i];
8183 /* only clear if anything is set */
8184 if (regs[i])
8185 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8186 }
8187
8188 /* phase 2: call the appropriate handler */
8189 for_each_set_bit(bit, (unsigned long *)&regs[0],
Jubin John17fb4f22016-02-14 20:21:52 -08008190 CCE_NUM_INT_CSRS * 64) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008191 is_interrupt(dd, bit);
8192 }
8193
8194 return IRQ_HANDLED;
8195}
8196
8197static irqreturn_t sdma_interrupt(int irq, void *data)
8198{
8199 struct sdma_engine *sde = data;
8200 struct hfi1_devdata *dd = sde->dd;
8201 u64 status;
8202
8203#ifdef CONFIG_SDMA_VERBOSITY
8204 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8205 slashstrip(__FILE__), __LINE__, __func__);
8206 sdma_dumpstate(sde);
8207#endif
8208
8209 this_cpu_inc(*dd->int_counter);
8210
8211 /* This read_csr is really bad in the hot path */
8212 status = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008213 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8214 & sde->imask;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008215 if (likely(status)) {
8216 /* clear the interrupt(s) */
8217 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008218 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8219 status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008220
8221 /* handle the interrupt(s) */
8222 sdma_engine_interrupt(sde, status);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008223 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -04008224 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008225 sde->this_idx);
Dennis Dalessandroee495ad2017-04-09 10:17:18 -07008226 }
Mike Marciniszyn77241052015-07-30 15:17:43 -04008227 return IRQ_HANDLED;
8228}
8229
8230/*
Dean Luickecd42f82016-02-03 14:35:14 -08008231 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8232 * to insure that the write completed. This does NOT guarantee that
8233 * queued DMA writes to memory from the chip are pushed.
Dean Luickf4f30031c2015-10-26 10:28:44 -04008234 */
8235static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8236{
8237 struct hfi1_devdata *dd = rcd->dd;
8238 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8239
8240 mmiowb(); /* make sure everything before is written */
8241 write_csr(dd, addr, rcd->imask);
8242 /* force the above write on the chip and get a value back */
8243 (void)read_csr(dd, addr);
8244}
8245
8246/* force the receive interrupt */
Jim Snowfb9036d2016-01-11 18:32:21 -05008247void force_recv_intr(struct hfi1_ctxtdata *rcd)
Dean Luickf4f30031c2015-10-26 10:28:44 -04008248{
8249 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8250}
8251
Dean Luickecd42f82016-02-03 14:35:14 -08008252/*
8253 * Return non-zero if a packet is present.
8254 *
8255 * This routine is called when rechecking for packets after the RcvAvail
8256 * interrupt has been cleared down. First, do a quick check of memory for
8257 * a packet present. If not found, use an expensive CSR read of the context
8258 * tail to determine the actual tail. The CSR read is necessary because there
8259 * is no method to push pending DMAs to memory other than an interrupt and we
8260 * are trying to determine if we need to force an interrupt.
8261 */
Dean Luickf4f30031c2015-10-26 10:28:44 -04008262static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8263{
Dean Luickecd42f82016-02-03 14:35:14 -08008264 u32 tail;
8265 int present;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008266
Dean Luickecd42f82016-02-03 14:35:14 -08008267 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8268 present = (rcd->seq_cnt ==
8269 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8270 else /* is RDMA rtail */
8271 present = (rcd->head != get_rcvhdrtail(rcd));
8272
8273 if (present)
8274 return 1;
8275
8276 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8277 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8278 return rcd->head != tail;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008279}
8280
8281/*
8282 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8283 * This routine will try to handle packets immediately (latency), but if
8284 * it finds too many, it will invoke the thread handler (bandwitdh). The
Jubin John16733b82016-02-14 20:20:58 -08008285 * chip receive interrupt is *not* cleared down until this or the thread (if
Dean Luickf4f30031c2015-10-26 10:28:44 -04008286 * invoked) is finished. The intent is to avoid extra interrupts while we
8287 * are processing packets anyway.
Mike Marciniszyn77241052015-07-30 15:17:43 -04008288 */
8289static irqreturn_t receive_context_interrupt(int irq, void *data)
8290{
8291 struct hfi1_ctxtdata *rcd = data;
8292 struct hfi1_devdata *dd = rcd->dd;
Dean Luickf4f30031c2015-10-26 10:28:44 -04008293 int disposition;
8294 int present;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008295
8296 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8297 this_cpu_inc(*dd->int_counter);
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -08008298 aspm_ctx_disable(rcd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008299
Dean Luickf4f30031c2015-10-26 10:28:44 -04008300 /* receive interrupt remains blocked while processing packets */
8301 disposition = rcd->do_interrupt(rcd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008302
Dean Luickf4f30031c2015-10-26 10:28:44 -04008303 /*
8304 * Too many packets were seen while processing packets in this
8305 * IRQ handler. Invoke the handler thread. The receive interrupt
8306 * remains blocked.
8307 */
8308 if (disposition == RCV_PKT_LIMIT)
8309 return IRQ_WAKE_THREAD;
8310
8311 /*
8312 * The packet processor detected no more packets. Clear the receive
8313 * interrupt and recheck for a packet packet that may have arrived
8314 * after the previous check and interrupt clear. If a packet arrived,
8315 * force another interrupt.
8316 */
8317 clear_recv_intr(rcd);
8318 present = check_packet_present(rcd);
8319 if (present)
8320 force_recv_intr(rcd);
8321
8322 return IRQ_HANDLED;
8323}
8324
8325/*
8326 * Receive packet thread handler. This expects to be invoked with the
8327 * receive interrupt still blocked.
8328 */
8329static irqreturn_t receive_context_thread(int irq, void *data)
8330{
8331 struct hfi1_ctxtdata *rcd = data;
8332 int present;
8333
8334 /* receive interrupt is still blocked from the IRQ handler */
8335 (void)rcd->do_interrupt(rcd, 1);
8336
8337 /*
8338 * The packet processor will only return if it detected no more
8339 * packets. Hold IRQs here so we can safely clear the interrupt and
8340 * recheck for a packet that may have arrived after the previous
8341 * check and the interrupt clear. If a packet arrived, force another
8342 * interrupt.
8343 */
8344 local_irq_disable();
8345 clear_recv_intr(rcd);
8346 present = check_packet_present(rcd);
8347 if (present)
8348 force_recv_intr(rcd);
8349 local_irq_enable();
Mike Marciniszyn77241052015-07-30 15:17:43 -04008350
8351 return IRQ_HANDLED;
8352}
8353
8354/* ========================================================================= */
8355
8356u32 read_physical_state(struct hfi1_devdata *dd)
8357{
8358 u64 reg;
8359
8360 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8361 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8362 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8363}
8364
Jim Snowfb9036d2016-01-11 18:32:21 -05008365u32 read_logical_state(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008366{
8367 u64 reg;
8368
8369 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8370 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8371 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8372}
8373
8374static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8375{
8376 u64 reg;
8377
8378 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8379 /* clear current state, set new state */
8380 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8381 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8382 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8383}
8384
8385/*
8386 * Use the 8051 to read a LCB CSR.
8387 */
8388static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8389{
8390 u32 regno;
8391 int ret;
8392
8393 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8394 if (acquire_lcb_access(dd, 0) == 0) {
8395 *data = read_csr(dd, addr);
8396 release_lcb_access(dd, 0);
8397 return 0;
8398 }
8399 return -EBUSY;
8400 }
8401
8402 /* register is an index of LCB registers: (offset - base) / 8 */
8403 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8404 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8405 if (ret != HCMD_SUCCESS)
8406 return -EBUSY;
8407 return 0;
8408}
8409
8410/*
Michael J. Ruhl86884262017-03-20 17:24:51 -07008411 * Provide a cache for some of the LCB registers in case the LCB is
8412 * unavailable.
8413 * (The LCB is unavailable in certain link states, for example.)
8414 */
8415struct lcb_datum {
8416 u32 off;
8417 u64 val;
8418};
8419
8420static struct lcb_datum lcb_cache[] = {
8421 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8422 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8423 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8424};
8425
8426static void update_lcb_cache(struct hfi1_devdata *dd)
8427{
8428 int i;
8429 int ret;
8430 u64 val;
8431
8432 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8433 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8434
8435 /* Update if we get good data */
8436 if (likely(ret != -EBUSY))
8437 lcb_cache[i].val = val;
8438 }
8439}
8440
8441static int read_lcb_cache(u32 off, u64 *val)
8442{
8443 int i;
8444
8445 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8446 if (lcb_cache[i].off == off) {
8447 *val = lcb_cache[i].val;
8448 return 0;
8449 }
8450 }
8451
8452 pr_warn("%s bad offset 0x%x\n", __func__, off);
8453 return -1;
8454}
8455
8456/*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008457 * Read an LCB CSR. Access may not be in host control, so check.
8458 * Return 0 on success, -EBUSY on failure.
8459 */
8460int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8461{
8462 struct hfi1_pportdata *ppd = dd->pport;
8463
8464 /* if up, go through the 8051 for the value */
8465 if (ppd->host_link_state & HLS_UP)
8466 return read_lcb_via_8051(dd, addr, data);
Michael J. Ruhl86884262017-03-20 17:24:51 -07008467 /* if going up or down, check the cache, otherwise, no access */
8468 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8469 if (read_lcb_cache(addr, data))
8470 return -EBUSY;
8471 return 0;
8472 }
8473
Mike Marciniszyn77241052015-07-30 15:17:43 -04008474 /* otherwise, host has access */
8475 *data = read_csr(dd, addr);
8476 return 0;
8477}
8478
8479/*
8480 * Use the 8051 to write a LCB CSR.
8481 */
8482static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8483{
Dean Luick3bf40d62015-11-06 20:07:04 -05008484 u32 regno;
8485 int ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008486
Dean Luick3bf40d62015-11-06 20:07:04 -05008487 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008488 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
Dean Luick3bf40d62015-11-06 20:07:04 -05008489 if (acquire_lcb_access(dd, 0) == 0) {
8490 write_csr(dd, addr, data);
8491 release_lcb_access(dd, 0);
8492 return 0;
8493 }
8494 return -EBUSY;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008495 }
Dean Luick3bf40d62015-11-06 20:07:04 -05008496
8497 /* register is an index of LCB registers: (offset - base) / 8 */
8498 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8499 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8500 if (ret != HCMD_SUCCESS)
8501 return -EBUSY;
8502 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008503}
8504
8505/*
8506 * Write an LCB CSR. Access may not be in host control, so check.
8507 * Return 0 on success, -EBUSY on failure.
8508 */
8509int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8510{
8511 struct hfi1_pportdata *ppd = dd->pport;
8512
8513 /* if up, go through the 8051 for the value */
8514 if (ppd->host_link_state & HLS_UP)
8515 return write_lcb_via_8051(dd, addr, data);
8516 /* if going up or down, no access */
8517 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8518 return -EBUSY;
8519 /* otherwise, host has access */
8520 write_csr(dd, addr, data);
8521 return 0;
8522}
8523
8524/*
8525 * Returns:
8526 * < 0 = Linux error, not able to get access
8527 * > 0 = 8051 command RETURN_CODE
8528 */
8529static int do_8051_command(
8530 struct hfi1_devdata *dd,
8531 u32 type,
8532 u64 in_data,
8533 u64 *out_data)
8534{
8535 u64 reg, completed;
8536 int return_code;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008537 unsigned long timeout;
8538
8539 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8540
Tadeusz Struk22546b72017-04-28 10:40:02 -07008541 mutex_lock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008542
8543 /* We can't send any commands to the 8051 if it's in reset */
8544 if (dd->dc_shutdown) {
8545 return_code = -ENODEV;
8546 goto fail;
8547 }
8548
8549 /*
8550 * If an 8051 host command timed out previously, then the 8051 is
8551 * stuck.
8552 *
8553 * On first timeout, attempt to reset and restart the entire DC
8554 * block (including 8051). (Is this too big of a hammer?)
8555 *
8556 * If the 8051 times out a second time, the reset did not bring it
8557 * back to healthy life. In that case, fail any subsequent commands.
8558 */
8559 if (dd->dc8051_timed_out) {
8560 if (dd->dc8051_timed_out > 1) {
8561 dd_dev_err(dd,
8562 "Previous 8051 host command timed out, skipping command %u\n",
8563 type);
8564 return_code = -ENXIO;
8565 goto fail;
8566 }
Tadeusz Struk22546b72017-04-28 10:40:02 -07008567 _dc_shutdown(dd);
8568 _dc_start(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008569 }
8570
8571 /*
8572 * If there is no timeout, then the 8051 command interface is
8573 * waiting for a command.
8574 */
8575
8576 /*
Dean Luick3bf40d62015-11-06 20:07:04 -05008577 * When writing a LCB CSR, out_data contains the full value to
8578 * to be written, while in_data contains the relative LCB
8579 * address in 7:0. Do the work here, rather than the caller,
8580 * of distrubting the write data to where it needs to go:
8581 *
8582 * Write data
8583 * 39:00 -> in_data[47:8]
8584 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8585 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8586 */
8587 if (type == HCMD_WRITE_LCB_CSR) {
8588 in_data |= ((*out_data) & 0xffffffffffull) << 8;
Dean Luick00801672016-12-07 19:33:40 -08008589 /* must preserve COMPLETED - it is tied to hardware */
8590 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8591 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8592 reg |= ((((*out_data) >> 40) & 0xff) <<
Dean Luick3bf40d62015-11-06 20:07:04 -05008593 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8594 | ((((*out_data) >> 48) & 0xffff) <<
8595 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8596 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8597 }
8598
8599 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -04008600 * Do two writes: the first to stabilize the type and req_data, the
8601 * second to activate.
8602 */
8603 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8604 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8605 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8606 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8607 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8608 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8609 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8610
8611 /* wait for completion, alternate: interrupt */
8612 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8613 while (1) {
8614 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8615 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8616 if (completed)
8617 break;
8618 if (time_after(jiffies, timeout)) {
8619 dd->dc8051_timed_out++;
8620 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8621 if (out_data)
8622 *out_data = 0;
8623 return_code = -ETIMEDOUT;
8624 goto fail;
8625 }
8626 udelay(2);
8627 }
8628
8629 if (out_data) {
8630 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8631 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8632 if (type == HCMD_READ_LCB_CSR) {
8633 /* top 16 bits are in a different register */
8634 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8635 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8636 << (48
8637 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8638 }
8639 }
8640 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8641 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8642 dd->dc8051_timed_out = 0;
8643 /*
8644 * Clear command for next user.
8645 */
8646 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8647
8648fail:
Tadeusz Struk22546b72017-04-28 10:40:02 -07008649 mutex_unlock(&dd->dc8051_lock);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008650 return return_code;
8651}
8652
8653static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8654{
8655 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8656}
8657
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008658int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8659 u8 lane_id, u32 config_data)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008660{
8661 u64 data;
8662 int ret;
8663
8664 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8665 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8666 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8667 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8668 if (ret != HCMD_SUCCESS) {
8669 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08008670 "load 8051 config: field id %d, lane %d, err %d\n",
8671 (int)field_id, (int)lane_id, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008672 }
8673 return ret;
8674}
8675
8676/*
8677 * Read the 8051 firmware "registers". Use the RAM directly. Always
8678 * set the result, even on error.
8679 * Return 0 on success, -errno on failure
8680 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08008681int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8682 u32 *result)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008683{
8684 u64 big_data;
8685 u32 addr;
8686 int ret;
8687
8688 /* address start depends on the lane_id */
8689 if (lane_id < 4)
8690 addr = (4 * NUM_GENERAL_FIELDS)
8691 + (lane_id * 4 * NUM_LANE_FIELDS);
8692 else
8693 addr = 0;
8694 addr += field_id * 4;
8695
8696 /* read is in 8-byte chunks, hardware will truncate the address down */
8697 ret = read_8051_data(dd, addr, 8, &big_data);
8698
8699 if (ret == 0) {
8700 /* extract the 4 bytes we want */
8701 if (addr & 0x4)
8702 *result = (u32)(big_data >> 32);
8703 else
8704 *result = (u32)big_data;
8705 } else {
8706 *result = 0;
8707 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008708 __func__, lane_id, field_id);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008709 }
8710
8711 return ret;
8712}
8713
8714static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8715 u8 continuous)
8716{
8717 u32 frame;
8718
8719 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8720 | power_management << POWER_MANAGEMENT_SHIFT;
8721 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8722 GENERAL_CONFIG, frame);
8723}
8724
8725static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8726 u16 vl15buf, u8 crc_sizes)
8727{
8728 u32 frame;
8729
8730 frame = (u32)vau << VAU_SHIFT
8731 | (u32)z << Z_SHIFT
8732 | (u32)vcu << VCU_SHIFT
8733 | (u32)vl15buf << VL15BUF_SHIFT
8734 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8735 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8736 GENERAL_CONFIG, frame);
8737}
8738
8739static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8740 u8 *flag_bits, u16 *link_widths)
8741{
8742 u32 frame;
8743
8744 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008745 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008746 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8747 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8748 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8749}
8750
8751static int write_vc_local_link_width(struct hfi1_devdata *dd,
8752 u8 misc_bits,
8753 u8 flag_bits,
8754 u16 link_widths)
8755{
8756 u32 frame;
8757
8758 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8759 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8760 | (u32)link_widths << LINK_WIDTH_SHIFT;
8761 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8762 frame);
8763}
8764
8765static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8766 u8 device_rev)
8767{
8768 u32 frame;
8769
8770 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8771 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8772 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8773}
8774
8775static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8776 u8 *device_rev)
8777{
8778 u32 frame;
8779
8780 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8781 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8782 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8783 & REMOTE_DEVICE_REV_MASK;
8784}
8785
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008786void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8787 u8 *ver_patch)
Mike Marciniszyn77241052015-07-30 15:17:43 -04008788{
8789 u32 frame;
8790
8791 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07008792 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8793 STS_FM_VERSION_MAJOR_MASK;
8794 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8795 STS_FM_VERSION_MINOR_MASK;
8796
8797 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8798 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8799 STS_FM_VERSION_PATCH_MASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -04008800}
8801
8802static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8803 u8 *continuous)
8804{
8805 u32 frame;
8806
8807 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8808 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8809 & POWER_MANAGEMENT_MASK;
8810 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8811 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8812}
8813
8814static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8815 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8816{
8817 u32 frame;
8818
8819 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8820 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8821 *z = (frame >> Z_SHIFT) & Z_MASK;
8822 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8823 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8824 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8825}
8826
8827static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8828 u8 *remote_tx_rate,
8829 u16 *link_widths)
8830{
8831 u32 frame;
8832
8833 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008834 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008835 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8836 & REMOTE_TX_RATE_MASK;
8837 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8838}
8839
8840static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8841{
8842 u32 frame;
8843
8844 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8845 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8846}
8847
8848static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8849{
8850 u32 frame;
8851
8852 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8853 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8854}
8855
8856static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8857{
8858 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8859}
8860
8861static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8862{
8863 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8864}
8865
8866void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8867{
8868 u32 frame;
8869 int ret;
8870
8871 *link_quality = 0;
8872 if (dd->pport->host_link_state & HLS_UP) {
8873 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
Jubin John17fb4f22016-02-14 20:21:52 -08008874 &frame);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008875 if (ret == 0)
8876 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8877 & LINK_QUALITY_MASK;
8878 }
8879}
8880
8881static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8882{
8883 u32 frame;
8884
8885 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8886 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8887}
8888
Dean Luickfeb831d2016-04-14 08:31:36 -07008889static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8890{
8891 u32 frame;
8892
8893 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8894 *ldr = (frame & 0xff);
8895}
8896
Mike Marciniszyn77241052015-07-30 15:17:43 -04008897static int read_tx_settings(struct hfi1_devdata *dd,
8898 u8 *enable_lane_tx,
8899 u8 *tx_polarity_inversion,
8900 u8 *rx_polarity_inversion,
8901 u8 *max_rate)
8902{
8903 u32 frame;
8904 int ret;
8905
8906 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8907 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8908 & ENABLE_LANE_TX_MASK;
8909 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8910 & TX_POLARITY_INVERSION_MASK;
8911 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8912 & RX_POLARITY_INVERSION_MASK;
8913 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8914 return ret;
8915}
8916
8917static int write_tx_settings(struct hfi1_devdata *dd,
8918 u8 enable_lane_tx,
8919 u8 tx_polarity_inversion,
8920 u8 rx_polarity_inversion,
8921 u8 max_rate)
8922{
8923 u32 frame;
8924
8925 /* no need to mask, all variable sizes match field widths */
8926 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8927 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8928 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8929 | max_rate << MAX_RATE_SHIFT;
8930 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8931}
8932
Mike Marciniszyn77241052015-07-30 15:17:43 -04008933/*
8934 * Read an idle LCB message.
8935 *
8936 * Returns 0 on success, -EINVAL on error
8937 */
8938static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8939{
8940 int ret;
8941
Jubin John17fb4f22016-02-14 20:21:52 -08008942 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008943 if (ret != HCMD_SUCCESS) {
8944 dd_dev_err(dd, "read idle message: type %d, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008945 (u32)type, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008946 return -EINVAL;
8947 }
8948 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8949 /* return only the payload as we already know the type */
8950 *data_out >>= IDLE_PAYLOAD_SHIFT;
8951 return 0;
8952}
8953
8954/*
8955 * Read an idle SMA message. To be done in response to a notification from
8956 * the 8051.
8957 *
8958 * Returns 0 on success, -EINVAL on error
8959 */
8960static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8961{
Jubin John17fb4f22016-02-14 20:21:52 -08008962 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8963 data);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008964}
8965
8966/*
8967 * Send an idle LCB message.
8968 *
8969 * Returns 0 on success, -EINVAL on error
8970 */
8971static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8972{
8973 int ret;
8974
8975 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8976 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8977 if (ret != HCMD_SUCCESS) {
8978 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -08008979 data, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008980 return -EINVAL;
8981 }
8982 return 0;
8983}
8984
8985/*
8986 * Send an idle SMA message.
8987 *
8988 * Returns 0 on success, -EINVAL on error
8989 */
8990int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8991{
8992 u64 data;
8993
Jubin John17fb4f22016-02-14 20:21:52 -08008994 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8995 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04008996 return send_idle_message(dd, data);
8997}
8998
8999/*
9000 * Initialize the LCB then do a quick link up. This may or may not be
9001 * in loopback.
9002 *
9003 * return 0 on success, -errno on error
9004 */
9005static int do_quick_linkup(struct hfi1_devdata *dd)
9006{
Mike Marciniszyn77241052015-07-30 15:17:43 -04009007 int ret;
9008
9009 lcb_shutdown(dd, 0);
9010
9011 if (loopback) {
9012 /* LCB_CFG_LOOPBACK.VAL = 2 */
9013 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9014 write_csr(dd, DC_LCB_CFG_LOOPBACK,
Jubin John17fb4f22016-02-14 20:21:52 -08009015 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009016 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9017 }
9018
9019 /* start the LCBs */
9020 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9021 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9022
9023 /* simulator only loopback steps */
9024 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9025 /* LCB_CFG_RUN.EN = 1 */
9026 write_csr(dd, DC_LCB_CFG_RUN,
Jubin John17fb4f22016-02-14 20:21:52 -08009027 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009028
Dean Luickec8a1422017-03-20 17:24:39 -07009029 ret = wait_link_transfer_active(dd, 10);
9030 if (ret)
9031 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009032
9033 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
Jubin John17fb4f22016-02-14 20:21:52 -08009034 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009035 }
9036
9037 if (!loopback) {
9038 /*
9039 * When doing quick linkup and not in loopback, both
9040 * sides must be done with LCB set-up before either
9041 * starts the quick linkup. Put a delay here so that
9042 * both sides can be started and have a chance to be
9043 * done with LCB set up before resuming.
9044 */
9045 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009046 "Pausing for peer to be finished with LCB set up\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009047 msleep(5000);
Jubin John17fb4f22016-02-14 20:21:52 -08009048 dd_dev_err(dd, "Continuing with quick linkup\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009049 }
9050
9051 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9052 set_8051_lcb_access(dd);
9053
9054 /*
9055 * State "quick" LinkUp request sets the physical link state to
9056 * LinkUp without a verify capability sequence.
9057 * This state is in simulator v37 and later.
9058 */
9059 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9060 if (ret != HCMD_SUCCESS) {
9061 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009062 "%s: set physical link state to quick LinkUp failed with return %d\n",
9063 __func__, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009064
9065 set_host_lcb_access(dd);
9066 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9067
9068 if (ret >= 0)
9069 ret = -EINVAL;
9070 return ret;
9071 }
9072
9073 return 0; /* success */
9074}
9075
9076/*
9077 * Set the SerDes to internal loopback mode.
9078 * Returns 0 on success, -errno on error.
9079 */
9080static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9081{
9082 int ret;
9083
9084 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9085 if (ret == HCMD_SUCCESS)
9086 return 0;
9087 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009088 "Set physical link state to SerDes Loopback failed with return %d\n",
9089 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009090 if (ret >= 0)
9091 ret = -EINVAL;
9092 return ret;
9093}
9094
9095/*
9096 * Do all special steps to set up loopback.
9097 */
9098static int init_loopback(struct hfi1_devdata *dd)
9099{
9100 dd_dev_info(dd, "Entering loopback mode\n");
9101
9102 /* all loopbacks should disable self GUID check */
9103 write_csr(dd, DC_DC8051_CFG_MODE,
Jubin John17fb4f22016-02-14 20:21:52 -08009104 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009105
9106 /*
9107 * The simulator has only one loopback option - LCB. Switch
9108 * to that option, which includes quick link up.
9109 *
9110 * Accept all valid loopback values.
9111 */
Jubin Johnd0d236e2016-02-14 20:20:15 -08009112 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9113 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9114 loopback == LOOPBACK_CABLE)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009115 loopback = LOOPBACK_LCB;
9116 quick_linkup = 1;
9117 return 0;
9118 }
9119
9120 /* handle serdes loopback */
9121 if (loopback == LOOPBACK_SERDES) {
9122 /* internal serdes loopack needs quick linkup on RTL */
9123 if (dd->icode == ICODE_RTL_SILICON)
9124 quick_linkup = 1;
9125 return set_serdes_loopback_mode(dd);
9126 }
9127
9128 /* LCB loopback - handled at poll time */
9129 if (loopback == LOOPBACK_LCB) {
9130 quick_linkup = 1; /* LCB is always quick linkup */
9131
9132 /* not supported in emulation due to emulation RTL changes */
9133 if (dd->icode == ICODE_FPGA_EMULATION) {
9134 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009135 "LCB loopback not supported in emulation\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -04009136 return -EINVAL;
9137 }
9138 return 0;
9139 }
9140
9141 /* external cable loopback requires no extra steps */
9142 if (loopback == LOOPBACK_CABLE)
9143 return 0;
9144
9145 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9146 return -EINVAL;
9147}
9148
9149/*
9150 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9151 * used in the Verify Capability link width attribute.
9152 */
9153static u16 opa_to_vc_link_widths(u16 opa_widths)
9154{
9155 int i;
9156 u16 result = 0;
9157
9158 static const struct link_bits {
9159 u16 from;
9160 u16 to;
9161 } opa_link_xlate[] = {
Jubin John8638b772016-02-14 20:19:24 -08009162 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9163 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9164 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9165 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
Mike Marciniszyn77241052015-07-30 15:17:43 -04009166 };
9167
9168 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9169 if (opa_widths & opa_link_xlate[i].from)
9170 result |= opa_link_xlate[i].to;
9171 }
9172 return result;
9173}
9174
9175/*
9176 * Set link attributes before moving to polling.
9177 */
9178static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9179{
9180 struct hfi1_devdata *dd = ppd->dd;
9181 u8 enable_lane_tx;
9182 u8 tx_polarity_inversion;
9183 u8 rx_polarity_inversion;
9184 int ret;
9185
9186 /* reset our fabric serdes to clear any lingering problems */
9187 fabric_serdes_reset(dd);
9188
9189 /* set the local tx rate - need to read-modify-write */
9190 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009191 &rx_polarity_inversion, &ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009192 if (ret)
9193 goto set_local_link_attributes_fail;
9194
Michael J. Ruhl5e6e94242017-03-20 17:25:48 -07009195 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009196 /* set the tx rate to the fastest enabled */
9197 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9198 ppd->local_tx_rate = 1;
9199 else
9200 ppd->local_tx_rate = 0;
9201 } else {
9202 /* set the tx rate to all enabled */
9203 ppd->local_tx_rate = 0;
9204 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9205 ppd->local_tx_rate |= 2;
9206 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9207 ppd->local_tx_rate |= 1;
9208 }
Easwar Hariharanfebffe22015-10-26 10:28:36 -04009209
9210 enable_lane_tx = 0xF; /* enable all four lanes */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009211 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
Jubin John17fb4f22016-02-14 20:21:52 -08009212 rx_polarity_inversion, ppd->local_tx_rate);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009213 if (ret != HCMD_SUCCESS)
9214 goto set_local_link_attributes_fail;
9215
9216 /*
9217 * DC supports continuous updates.
9218 */
Jubin John17fb4f22016-02-14 20:21:52 -08009219 ret = write_vc_local_phy(dd,
9220 0 /* no power management */,
9221 1 /* continuous updates */);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009222 if (ret != HCMD_SUCCESS)
9223 goto set_local_link_attributes_fail;
9224
9225 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9226 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9227 ppd->port_crc_mode_enabled);
9228 if (ret != HCMD_SUCCESS)
9229 goto set_local_link_attributes_fail;
9230
9231 ret = write_vc_local_link_width(dd, 0, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009232 opa_to_vc_link_widths(
9233 ppd->link_width_enabled));
Mike Marciniszyn77241052015-07-30 15:17:43 -04009234 if (ret != HCMD_SUCCESS)
9235 goto set_local_link_attributes_fail;
9236
9237 /* let peer know who we are */
9238 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9239 if (ret == HCMD_SUCCESS)
9240 return 0;
9241
9242set_local_link_attributes_fail:
9243 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009244 "Failed to set local link attributes, return 0x%x\n",
9245 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009246 return ret;
9247}
9248
9249/*
Easwar Hariharan623bba22016-04-12 11:25:57 -07009250 * Call this to start the link.
9251 * Do not do anything if the link is disabled.
9252 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009253 */
9254int start_link(struct hfi1_pportdata *ppd)
9255{
Dean Luick0db9dec2016-09-06 04:35:20 -07009256 /*
9257 * Tune the SerDes to a ballpark setting for optimal signal and bit
9258 * error rate. Needs to be done before starting the link.
9259 */
9260 tune_serdes(ppd);
9261
Mike Marciniszyn77241052015-07-30 15:17:43 -04009262 if (!ppd->link_enabled) {
9263 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009264 "%s: stopping link start because link is disabled\n",
9265 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009266 return 0;
9267 }
9268 if (!ppd->driver_link_ready) {
9269 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009270 "%s: stopping link start because driver is not ready\n",
9271 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009272 return 0;
9273 }
9274
Sebastian Sanchez3ec5fa22016-06-09 07:51:57 -07009275 /*
9276 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9277 * pkey table can be configured properly if the HFI unit is connected
9278 * to switch port with MgmtAllowed=NO
9279 */
9280 clear_full_mgmt_pkey(ppd);
9281
Easwar Hariharan623bba22016-04-12 11:25:57 -07009282 return set_link_state(ppd, HLS_DN_POLL);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009283}
9284
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009285static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9286{
9287 struct hfi1_devdata *dd = ppd->dd;
9288 u64 mask;
9289 unsigned long timeout;
9290
9291 /*
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009292 * Some QSFP cables have a quirk that asserts the IntN line as a side
9293 * effect of power up on plug-in. We ignore this false positive
9294 * interrupt until the module has finished powering up by waiting for
9295 * a minimum timeout of the module inrush initialization time of
9296 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9297 * module have stabilized.
9298 */
9299 msleep(500);
9300
9301 /*
9302 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009303 */
9304 timeout = jiffies + msecs_to_jiffies(2000);
9305 while (1) {
9306 mask = read_csr(dd, dd->hfi1_id ?
9307 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009308 if (!(mask & QSFP_HFI0_INT_N))
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009309 break;
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009310 if (time_after(jiffies, timeout)) {
9311 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9312 __func__);
9313 break;
9314 }
9315 udelay(2);
9316 }
9317}
9318
9319static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9320{
9321 struct hfi1_devdata *dd = ppd->dd;
9322 u64 mask;
9323
9324 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009325 if (enable) {
9326 /*
9327 * Clear the status register to avoid an immediate interrupt
9328 * when we re-enable the IntN pin
9329 */
9330 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9331 QSFP_HFI0_INT_N);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009332 mask |= (u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009333 } else {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009334 mask &= ~(u64)QSFP_HFI0_INT_N;
Easwar Hariharan5fbd98d2016-07-25 13:39:57 -07009335 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009336 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9337}
9338
9339void reset_qsfp(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009340{
9341 struct hfi1_devdata *dd = ppd->dd;
9342 u64 mask, qsfp_mask;
9343
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009344 /* Disable INT_N from triggering QSFP interrupts */
9345 set_qsfp_int_n(ppd, 0);
9346
9347 /* Reset the QSFP */
Mike Marciniszyn77241052015-07-30 15:17:43 -04009348 mask = (u64)QSFP_HFI0_RESET_N;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009349
9350 qsfp_mask = read_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009351 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009352 qsfp_mask &= ~mask;
9353 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009354 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009355
9356 udelay(10);
9357
9358 qsfp_mask |= mask;
9359 write_csr(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009360 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009361
9362 wait_for_qsfp_init(ppd);
9363
9364 /*
9365 * Allow INT_N to trigger the QSFP interrupt to watch
9366 * for alarms and warnings
9367 */
9368 set_qsfp_int_n(ppd, 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009369}
9370
9371static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9372 u8 *qsfp_interrupt_status)
9373{
9374 struct hfi1_devdata *dd = ppd->dd;
9375
9376 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009377 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
Neel Desai03e80e92017-04-09 10:16:47 -07009378 dd_dev_info(dd, "%s: QSFP cable temperature too high\n",
Jubin John17fb4f22016-02-14 20:21:52 -08009379 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009380
9381 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009382 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9383 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9384 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009385
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009386 /*
9387 * The remaining alarms/warnings don't matter if the link is down.
9388 */
9389 if (ppd->host_link_state & HLS_DOWN)
9390 return 0;
9391
Mike Marciniszyn77241052015-07-30 15:17:43 -04009392 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009393 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9394 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9395 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009396
9397 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009398 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9399 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9400 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009401
9402 /* Byte 2 is vendor specific */
9403
9404 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009405 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9406 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9407 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009408
9409 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009410 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9411 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9412 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009413
9414 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009415 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9416 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9417 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009418
9419 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009420 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9421 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9422 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009423
9424 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009425 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9426 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9427 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009428
9429 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009430 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9431 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9432 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009433
9434 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009435 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9436 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9437 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009438
9439 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009440 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9441 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9442 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009443
9444 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009445 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9446 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9447 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009448
9449 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009450 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9451 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9452 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009453
9454 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009455 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9456 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9457 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009458
9459 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
Jubin John17fb4f22016-02-14 20:21:52 -08009460 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9461 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9462 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009463
9464 /* Bytes 9-10 and 11-12 are reserved */
9465 /* Bytes 13-15 are vendor specific */
9466
9467 return 0;
9468}
9469
Easwar Hariharan623bba22016-04-12 11:25:57 -07009470/* This routine will only be scheduled if the QSFP module present is asserted */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009471void qsfp_event(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009472{
9473 struct qsfp_data *qd;
9474 struct hfi1_pportdata *ppd;
9475 struct hfi1_devdata *dd;
9476
9477 qd = container_of(work, struct qsfp_data, qsfp_work);
9478 ppd = qd->ppd;
9479 dd = ppd->dd;
9480
9481 /* Sanity check */
9482 if (!qsfp_mod_present(ppd))
9483 return;
9484
9485 /*
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -07009486 * Turn DC back on after cable has been re-inserted. Up until
9487 * now, the DC has been in reset to save power.
Mike Marciniszyn77241052015-07-30 15:17:43 -04009488 */
9489 dc_start(dd);
9490
9491 if (qd->cache_refresh_required) {
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009492 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009493
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009494 wait_for_qsfp_init(ppd);
9495
9496 /*
9497 * Allow INT_N to trigger the QSFP interrupt to watch
9498 * for alarms and warnings
Mike Marciniszyn77241052015-07-30 15:17:43 -04009499 */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009500 set_qsfp_int_n(ppd, 1);
9501
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009502 start_link(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009503 }
9504
9505 if (qd->check_interrupt_flags) {
9506 u8 qsfp_interrupt_status[16] = {0,};
9507
Dean Luick765a6fa2016-03-05 08:50:06 -08009508 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9509 &qsfp_interrupt_status[0], 16) != 16) {
Mike Marciniszyn77241052015-07-30 15:17:43 -04009510 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009511 "%s: Failed to read status of QSFP module\n",
9512 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009513 } else {
9514 unsigned long flags;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009515
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009516 handle_qsfp_error_conditions(
9517 ppd, qsfp_interrupt_status);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009518 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9519 ppd->qsfp_info.check_interrupt_flags = 0;
9520 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
Jubin John17fb4f22016-02-14 20:21:52 -08009521 flags);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009522 }
9523 }
9524}
9525
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009526static void init_qsfp_int(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009527{
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009528 struct hfi1_pportdata *ppd = dd->pport;
9529 u64 qsfp_mask, cce_int_mask;
9530 const int qsfp1_int_smask = QSFP1_INT % 64;
9531 const int qsfp2_int_smask = QSFP2_INT % 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009532
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009533 /*
9534 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9535 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9536 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9537 * the index of the appropriate CSR in the CCEIntMask CSR array
9538 */
9539 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9540 (8 * (QSFP1_INT / 64)));
9541 if (dd->hfi1_id) {
9542 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9543 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9544 cce_int_mask);
9545 } else {
9546 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9547 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9548 cce_int_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009549 }
9550
Mike Marciniszyn77241052015-07-30 15:17:43 -04009551 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9552 /* Clear current status to avoid spurious interrupts */
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009553 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9554 qsfp_mask);
9555 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9556 qsfp_mask);
9557
9558 set_qsfp_int_n(ppd, 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009559
9560 /* Handle active low nature of INT_N and MODPRST_N pins */
9561 if (qsfp_mod_present(ppd))
9562 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9563 write_csr(dd,
9564 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9565 qsfp_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009566}
9567
Dean Luickbbdeb332015-12-01 15:38:15 -05009568/*
9569 * Do a one-time initialize of the LCB block.
9570 */
9571static void init_lcb(struct hfi1_devdata *dd)
9572{
Dean Luicka59329d2016-02-03 14:32:31 -08009573 /* simulator does not correctly handle LCB cclk loopback, skip */
9574 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9575 return;
9576
Dean Luickbbdeb332015-12-01 15:38:15 -05009577 /* the DC has been reset earlier in the driver load */
9578
9579 /* set LCB for cclk loopback on the port */
9580 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9581 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9582 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9583 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9584 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9585 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9586 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9587}
9588
Dean Luick673b9752016-08-31 07:24:33 -07009589/*
9590 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9591 * on error.
9592 */
9593static int test_qsfp_read(struct hfi1_pportdata *ppd)
9594{
9595 int ret;
9596 u8 status;
9597
Easwar Hariharanfb897ad2017-03-20 17:25:42 -07009598 /*
9599 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9600 * not present
9601 */
9602 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
Dean Luick673b9752016-08-31 07:24:33 -07009603 return 0;
9604
9605 /* read byte 2, the status byte */
9606 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9607 if (ret < 0)
9608 return ret;
9609 if (ret != 1)
9610 return -EIO;
9611
9612 return 0; /* success */
9613}
9614
9615/*
9616 * Values for QSFP retry.
9617 *
9618 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9619 * arrived at from experience on a large cluster.
9620 */
9621#define MAX_QSFP_RETRIES 20
9622#define QSFP_RETRY_WAIT 500 /* msec */
9623
9624/*
9625 * Try a QSFP read. If it fails, schedule a retry for later.
9626 * Called on first link activation after driver load.
9627 */
9628static void try_start_link(struct hfi1_pportdata *ppd)
9629{
9630 if (test_qsfp_read(ppd)) {
9631 /* read failed */
9632 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9633 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9634 return;
9635 }
9636 dd_dev_info(ppd->dd,
9637 "QSFP not responding, waiting and retrying %d\n",
9638 (int)ppd->qsfp_retry_count);
9639 ppd->qsfp_retry_count++;
9640 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9641 msecs_to_jiffies(QSFP_RETRY_WAIT));
9642 return;
9643 }
9644 ppd->qsfp_retry_count = 0;
9645
Dean Luick673b9752016-08-31 07:24:33 -07009646 start_link(ppd);
9647}
9648
9649/*
9650 * Workqueue function to start the link after a delay.
9651 */
9652void handle_start_link(struct work_struct *work)
9653{
9654 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9655 start_link_work.work);
9656 try_start_link(ppd);
9657}
9658
Mike Marciniszyn77241052015-07-30 15:17:43 -04009659int bringup_serdes(struct hfi1_pportdata *ppd)
9660{
9661 struct hfi1_devdata *dd = ppd->dd;
9662 u64 guid;
9663 int ret;
9664
9665 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9666 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9667
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009668 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
Mike Marciniszyn77241052015-07-30 15:17:43 -04009669 if (!guid) {
9670 if (dd->base_guid)
9671 guid = dd->base_guid + ppd->port - 1;
Jakub Pawlaka6cd5f02016-10-17 04:19:30 -07009672 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009673 }
9674
Mike Marciniszyn77241052015-07-30 15:17:43 -04009675 /* Set linkinit_reason on power up per OPA spec */
9676 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9677
Dean Luickbbdeb332015-12-01 15:38:15 -05009678 /* one-time init of the LCB */
9679 init_lcb(dd);
9680
Mike Marciniszyn77241052015-07-30 15:17:43 -04009681 if (loopback) {
9682 ret = init_loopback(dd);
9683 if (ret < 0)
9684 return ret;
9685 }
9686
Easwar Hariharan9775a992016-05-12 10:22:39 -07009687 get_port_type(ppd);
9688 if (ppd->port_type == PORT_TYPE_QSFP) {
9689 set_qsfp_int_n(ppd, 0);
9690 wait_for_qsfp_init(ppd);
9691 set_qsfp_int_n(ppd, 1);
9692 }
9693
Dean Luick673b9752016-08-31 07:24:33 -07009694 try_start_link(ppd);
9695 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009696}
9697
9698void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9699{
9700 struct hfi1_devdata *dd = ppd->dd;
9701
9702 /*
9703 * Shut down the link and keep it down. First turn off that the
9704 * driver wants to allow the link to be up (driver_link_ready).
9705 * Then make sure the link is not automatically restarted
9706 * (link_enabled). Cancel any pending restart. And finally
9707 * go offline.
9708 */
9709 ppd->driver_link_ready = 0;
9710 ppd->link_enabled = 0;
9711
Dean Luick673b9752016-08-31 07:24:33 -07009712 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9713 flush_delayed_work(&ppd->start_link_work);
9714 cancel_delayed_work_sync(&ppd->start_link_work);
9715
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -08009716 ppd->offline_disabled_reason =
9717 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009718 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
Jubin John17fb4f22016-02-14 20:21:52 -08009719 OPA_LINKDOWN_REASON_SMA_DISABLED);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009720 set_link_state(ppd, HLS_DN_OFFLINE);
9721
9722 /* disable the port */
9723 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9724}
9725
9726static inline int init_cpu_counters(struct hfi1_devdata *dd)
9727{
9728 struct hfi1_pportdata *ppd;
9729 int i;
9730
9731 ppd = (struct hfi1_pportdata *)(dd + 1);
9732 for (i = 0; i < dd->num_pports; i++, ppd++) {
Dennis Dalessandro4eb06882016-01-19 14:42:39 -08009733 ppd->ibport_data.rvp.rc_acks = NULL;
9734 ppd->ibport_data.rvp.rc_qacks = NULL;
9735 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9736 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9737 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9738 if (!ppd->ibport_data.rvp.rc_acks ||
9739 !ppd->ibport_data.rvp.rc_delayed_comp ||
9740 !ppd->ibport_data.rvp.rc_qacks)
Mike Marciniszyn77241052015-07-30 15:17:43 -04009741 return -ENOMEM;
9742 }
9743
9744 return 0;
9745}
9746
9747static const char * const pt_names[] = {
9748 "expected",
9749 "eager",
9750 "invalid"
9751};
9752
9753static const char *pt_name(u32 type)
9754{
9755 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9756}
9757
9758/*
9759 * index is the index into the receive array
9760 */
9761void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9762 u32 type, unsigned long pa, u16 order)
9763{
9764 u64 reg;
9765 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9766 (dd->kregbase + RCV_ARRAY));
9767
9768 if (!(dd->flags & HFI1_PRESENT))
9769 goto done;
9770
9771 if (type == PT_INVALID) {
9772 pa = 0;
9773 } else if (type > PT_INVALID) {
9774 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -08009775 "unexpected receive array type %u for index %u, not handled\n",
9776 type, index);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009777 goto done;
9778 }
9779
9780 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9781 pt_name(type), index, pa, (unsigned long)order);
9782
9783#define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9784 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9785 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9786 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9787 << RCV_ARRAY_RT_ADDR_SHIFT;
9788 writeq(reg, base + (index * 8));
9789
9790 if (type == PT_EAGER)
9791 /*
9792 * Eager entries are written one-by-one so we have to push them
9793 * after we write the entry.
9794 */
9795 flush_wc();
9796done:
9797 return;
9798}
9799
9800void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9801{
9802 struct hfi1_devdata *dd = rcd->dd;
9803 u32 i;
9804
9805 /* this could be optimized */
9806 for (i = rcd->eager_base; i < rcd->eager_base +
9807 rcd->egrbufs.alloced; i++)
9808 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9809
9810 for (i = rcd->expected_base;
9811 i < rcd->expected_base + rcd->expected_count; i++)
9812 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9813}
9814
Mike Marciniszyn77241052015-07-30 15:17:43 -04009815static const char * const ib_cfg_name_strings[] = {
9816 "HFI1_IB_CFG_LIDLMC",
9817 "HFI1_IB_CFG_LWID_DG_ENB",
9818 "HFI1_IB_CFG_LWID_ENB",
9819 "HFI1_IB_CFG_LWID",
9820 "HFI1_IB_CFG_SPD_ENB",
9821 "HFI1_IB_CFG_SPD",
9822 "HFI1_IB_CFG_RXPOL_ENB",
9823 "HFI1_IB_CFG_LREV_ENB",
9824 "HFI1_IB_CFG_LINKLATENCY",
9825 "HFI1_IB_CFG_HRTBT",
9826 "HFI1_IB_CFG_OP_VLS",
9827 "HFI1_IB_CFG_VL_HIGH_CAP",
9828 "HFI1_IB_CFG_VL_LOW_CAP",
9829 "HFI1_IB_CFG_OVERRUN_THRESH",
9830 "HFI1_IB_CFG_PHYERR_THRESH",
9831 "HFI1_IB_CFG_LINKDEFAULT",
9832 "HFI1_IB_CFG_PKEYS",
9833 "HFI1_IB_CFG_MTU",
9834 "HFI1_IB_CFG_LSTATE",
9835 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9836 "HFI1_IB_CFG_PMA_TICKS",
9837 "HFI1_IB_CFG_PORT"
9838};
9839
9840static const char *ib_cfg_name(int which)
9841{
9842 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9843 return "invalid";
9844 return ib_cfg_name_strings[which];
9845}
9846
9847int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9848{
9849 struct hfi1_devdata *dd = ppd->dd;
9850 int val = 0;
9851
9852 switch (which) {
9853 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9854 val = ppd->link_width_enabled;
9855 break;
9856 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9857 val = ppd->link_width_active;
9858 break;
9859 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9860 val = ppd->link_speed_enabled;
9861 break;
9862 case HFI1_IB_CFG_SPD: /* current Link speed */
9863 val = ppd->link_speed_active;
9864 break;
9865
9866 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9867 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9868 case HFI1_IB_CFG_LINKLATENCY:
9869 goto unimplemented;
9870
9871 case HFI1_IB_CFG_OP_VLS:
9872 val = ppd->vls_operational;
9873 break;
9874 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9875 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9876 break;
9877 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9878 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9879 break;
9880 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9881 val = ppd->overrun_threshold;
9882 break;
9883 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9884 val = ppd->phy_error_threshold;
9885 break;
9886 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9887 val = dd->link_default;
9888 break;
9889
9890 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9891 case HFI1_IB_CFG_PMA_TICKS:
9892 default:
9893unimplemented:
9894 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9895 dd_dev_info(
9896 dd,
9897 "%s: which %s: not implemented\n",
9898 __func__,
9899 ib_cfg_name(which));
9900 break;
9901 }
9902
9903 return val;
9904}
9905
9906/*
9907 * The largest MAD packet size.
9908 */
9909#define MAX_MAD_PACKET 2048
9910
9911/*
9912 * Return the maximum header bytes that can go on the _wire_
9913 * for this device. This count includes the ICRC which is
9914 * not part of the packet held in memory but it is appended
9915 * by the HW.
9916 * This is dependent on the device's receive header entry size.
9917 * HFI allows this to be set per-receive context, but the
9918 * driver presently enforces a global value.
9919 */
9920u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9921{
9922 /*
9923 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9924 * the Receive Header Entry Size minus the PBC (or RHF) size
9925 * plus one DW for the ICRC appended by HW.
9926 *
9927 * dd->rcd[0].rcvhdrqentsize is in DW.
9928 * We use rcd[0] as all context will have the same value. Also,
9929 * the first kernel context would have been allocated by now so
9930 * we are guaranteed a valid value.
9931 */
9932 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9933}
9934
9935/*
9936 * Set Send Length
9937 * @ppd - per port data
9938 *
9939 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9940 * registers compare against LRH.PktLen, so use the max bytes included
9941 * in the LRH.
9942 *
9943 * This routine changes all VL values except VL15, which it maintains at
9944 * the same value.
9945 */
9946static void set_send_length(struct hfi1_pportdata *ppd)
9947{
9948 struct hfi1_devdata *dd = ppd->dd;
Harish Chegondi6cc6ad22015-12-01 15:38:24 -05009949 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9950 u32 maxvlmtu = dd->vld[15].mtu;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009951 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9952 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9953 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
Jubin Johnb4ba6632016-06-09 07:51:08 -07009954 int i, j;
Jianxin Xiong44306f12016-04-12 11:30:28 -07009955 u32 thres;
Mike Marciniszyn77241052015-07-30 15:17:43 -04009956
9957 for (i = 0; i < ppd->vls_supported; i++) {
9958 if (dd->vld[i].mtu > maxvlmtu)
9959 maxvlmtu = dd->vld[i].mtu;
9960 if (i <= 3)
9961 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9962 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9963 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9964 else
9965 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9966 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9967 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9968 }
9969 write_csr(dd, SEND_LEN_CHECK0, len1);
9970 write_csr(dd, SEND_LEN_CHECK1, len2);
9971 /* adjust kernel credit return thresholds based on new MTUs */
9972 /* all kernel receive contexts have the same hdrqentsize */
9973 for (i = 0; i < ppd->vls_supported; i++) {
Jianxin Xiong44306f12016-04-12 11:30:28 -07009974 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9975 sc_mtu_to_threshold(dd->vld[i].sc,
9976 dd->vld[i].mtu,
Jubin John17fb4f22016-02-14 20:21:52 -08009977 dd->rcd[0]->rcvhdrqentsize));
Jubin Johnb4ba6632016-06-09 07:51:08 -07009978 for (j = 0; j < INIT_SC_PER_VL; j++)
9979 sc_set_cr_threshold(
9980 pio_select_send_context_vl(dd, j, i),
9981 thres);
Jianxin Xiong44306f12016-04-12 11:30:28 -07009982 }
9983 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9984 sc_mtu_to_threshold(dd->vld[15].sc,
9985 dd->vld[15].mtu,
9986 dd->rcd[0]->rcvhdrqentsize));
9987 sc_set_cr_threshold(dd->vld[15].sc, thres);
Mike Marciniszyn77241052015-07-30 15:17:43 -04009988
9989 /* Adjust maximum MTU for the port in DC */
9990 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9991 (ilog2(maxvlmtu >> 8) + 1);
9992 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9993 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9994 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9995 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9996 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9997}
9998
9999static void set_lidlmc(struct hfi1_pportdata *ppd)
10000{
10001 int i;
10002 u64 sreg = 0;
10003 struct hfi1_devdata *dd = ppd->dd;
10004 u32 mask = ~((1U << ppd->lmc) - 1);
10005 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10006
Mike Marciniszyn77241052015-07-30 15:17:43 -040010007 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10008 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10009 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
Jubin John8638b772016-02-14 20:19:24 -080010010 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
Mike Marciniszyn77241052015-07-30 15:17:43 -040010011 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10012 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10013 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10014
10015 /*
10016 * Iterate over all the send contexts and set their SLID check
10017 */
10018 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10019 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10020 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10021 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10022
10023 for (i = 0; i < dd->chip_send_contexts; i++) {
10024 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10025 i, (u32)sreg);
10026 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10027 }
10028
10029 /* Now we have to do the same thing for the sdma engines */
10030 sdma_update_lmc(dd, mask, ppd->lid);
10031}
10032
Dean Luick6854c692016-07-25 13:38:56 -070010033static const char *state_completed_string(u32 completed)
10034{
10035 static const char * const state_completed[] = {
10036 "EstablishComm",
10037 "OptimizeEQ",
10038 "VerifyCap"
10039 };
10040
10041 if (completed < ARRAY_SIZE(state_completed))
10042 return state_completed[completed];
10043
10044 return "unknown";
10045}
10046
10047static const char all_lanes_dead_timeout_expired[] =
10048 "All lanes were inactive – was the interconnect media removed?";
10049static const char tx_out_of_policy[] =
10050 "Passing lanes on local port do not meet the local link width policy";
10051static const char no_state_complete[] =
10052 "State timeout occurred before link partner completed the state";
10053static const char * const state_complete_reasons[] = {
10054 [0x00] = "Reason unknown",
10055 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10056 [0x02] = "Link partner reported failure",
10057 [0x10] = "Unable to achieve frame sync on any lane",
10058 [0x11] =
10059 "Unable to find a common bit rate with the link partner",
10060 [0x12] =
10061 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10062 [0x13] =
10063 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10064 [0x14] = no_state_complete,
10065 [0x15] =
10066 "State timeout occurred before link partner identified equalization presets",
10067 [0x16] =
10068 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10069 [0x17] = tx_out_of_policy,
10070 [0x20] = all_lanes_dead_timeout_expired,
10071 [0x21] =
10072 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10073 [0x22] = no_state_complete,
10074 [0x23] =
10075 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10076 [0x24] = tx_out_of_policy,
10077 [0x30] = all_lanes_dead_timeout_expired,
10078 [0x31] =
10079 "State timeout occurred waiting for host to process received frames",
10080 [0x32] = no_state_complete,
10081 [0x33] =
10082 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10083 [0x34] = tx_out_of_policy,
10084};
10085
10086static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10087 u32 code)
10088{
10089 const char *str = NULL;
10090
10091 if (code < ARRAY_SIZE(state_complete_reasons))
10092 str = state_complete_reasons[code];
10093
10094 if (str)
10095 return str;
10096 return "Reserved";
10097}
10098
10099/* describe the given last state complete frame */
10100static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10101 const char *prefix)
10102{
10103 struct hfi1_devdata *dd = ppd->dd;
10104 u32 success;
10105 u32 state;
10106 u32 reason;
10107 u32 lanes;
10108
10109 /*
10110 * Decode frame:
10111 * [ 0: 0] - success
10112 * [ 3: 1] - state
10113 * [ 7: 4] - next state timeout
10114 * [15: 8] - reason code
10115 * [31:16] - lanes
10116 */
10117 success = frame & 0x1;
10118 state = (frame >> 1) & 0x7;
10119 reason = (frame >> 8) & 0xff;
10120 lanes = (frame >> 16) & 0xffff;
10121
10122 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10123 prefix, frame);
10124 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10125 state_completed_string(state), state);
10126 dd_dev_err(dd, " state successfully completed: %s\n",
10127 success ? "yes" : "no");
10128 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10129 reason, state_complete_reason_code_string(ppd, reason));
10130 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10131}
10132
10133/*
10134 * Read the last state complete frames and explain them. This routine
10135 * expects to be called if the link went down during link negotiation
10136 * and initialization (LNI). That is, anywhere between polling and link up.
10137 */
10138static void check_lni_states(struct hfi1_pportdata *ppd)
10139{
10140 u32 last_local_state;
10141 u32 last_remote_state;
10142
10143 read_last_local_state(ppd->dd, &last_local_state);
10144 read_last_remote_state(ppd->dd, &last_remote_state);
10145
10146 /*
10147 * Don't report anything if there is nothing to report. A value of
10148 * 0 means the link was taken down while polling and there was no
10149 * training in-process.
10150 */
10151 if (last_local_state == 0 && last_remote_state == 0)
10152 return;
10153
10154 decode_state_complete(ppd, last_local_state, "transmitted");
10155 decode_state_complete(ppd, last_remote_state, "received");
10156}
10157
Dean Luickec8a1422017-03-20 17:24:39 -070010158/* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10159static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10160{
10161 u64 reg;
10162 unsigned long timeout;
10163
10164 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10165 timeout = jiffies + msecs_to_jiffies(wait_ms);
10166 while (1) {
10167 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10168 if (reg)
10169 break;
10170 if (time_after(jiffies, timeout)) {
10171 dd_dev_err(dd,
10172 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10173 return -ETIMEDOUT;
10174 }
10175 udelay(2);
10176 }
10177 return 0;
10178}
10179
10180/* called when the logical link state is not down as it should be */
10181static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10182{
10183 struct hfi1_devdata *dd = ppd->dd;
10184
10185 /*
10186 * Bring link up in LCB loopback
10187 */
10188 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10189 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10190 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10191
10192 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10193 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10194 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10195 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10196
10197 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10198 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10199 udelay(3);
10200 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10201 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10202
10203 wait_link_transfer_active(dd, 100);
10204
10205 /*
10206 * Bring the link down again.
10207 */
10208 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10209 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10210 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10211
10212 /* call again to adjust ppd->statusp, if needed */
10213 get_logical_state(ppd);
10214}
10215
Mike Marciniszyn77241052015-07-30 15:17:43 -040010216/*
10217 * Helper for set_link_state(). Do not call except from that routine.
10218 * Expects ppd->hls_mutex to be held.
10219 *
10220 * @rem_reason value to be sent to the neighbor
10221 *
10222 * LinkDownReasons only set if transition succeeds.
10223 */
10224static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10225{
10226 struct hfi1_devdata *dd = ppd->dd;
10227 u32 pstate, previous_state;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010228 int ret;
10229 int do_transition;
10230 int do_wait;
10231
Michael J. Ruhl86884262017-03-20 17:24:51 -070010232 update_lcb_cache(dd);
10233
Mike Marciniszyn77241052015-07-30 15:17:43 -040010234 previous_state = ppd->host_link_state;
10235 ppd->host_link_state = HLS_GOING_OFFLINE;
10236 pstate = read_physical_state(dd);
10237 if (pstate == PLS_OFFLINE) {
10238 do_transition = 0; /* in right state */
10239 do_wait = 0; /* ...no need to wait */
Jakub Byczkowski02d10082017-05-04 05:13:58 -070010240 } else if ((pstate & 0xf0) == PLS_OFFLINE) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010241 do_transition = 0; /* in an offline transient state */
10242 do_wait = 1; /* ...wait for it to settle */
10243 } else {
10244 do_transition = 1; /* need to move to offline */
10245 do_wait = 1; /* ...will need to wait */
10246 }
10247
10248 if (do_transition) {
10249 ret = set_physical_link_state(dd,
Harish Chegondibf640092016-03-05 08:49:29 -080010250 (rem_reason << 8) | PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010251
10252 if (ret != HCMD_SUCCESS) {
10253 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010254 "Failed to transition to Offline link state, return %d\n",
10255 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010256 return -EINVAL;
10257 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010258 if (ppd->offline_disabled_reason ==
10259 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010260 ppd->offline_disabled_reason =
Bryan Morgana9c05e32016-02-03 14:30:49 -080010261 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010262 }
10263
10264 if (do_wait) {
10265 /* it can take a while for the link to go down */
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010266 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010267 if (ret < 0)
10268 return ret;
10269 }
10270
Mike Marciniszyn77241052015-07-30 15:17:43 -040010271 /*
10272 * Now in charge of LCB - must be after the physical state is
10273 * offline.quiet and before host_link_state is changed.
10274 */
10275 set_host_lcb_access(dd);
10276 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
Dean Luickec8a1422017-03-20 17:24:39 -070010277
10278 /* make sure the logical state is also down */
10279 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10280 if (ret)
10281 force_logical_link_state_down(ppd);
10282
Mike Marciniszyn77241052015-07-30 15:17:43 -040010283 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10284
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010285 if (ppd->port_type == PORT_TYPE_QSFP &&
10286 ppd->qsfp_info.limiting_active &&
10287 qsfp_mod_present(ppd)) {
Dean Luick765a6fa2016-03-05 08:50:06 -080010288 int ret;
10289
10290 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10291 if (ret == 0) {
10292 set_qsfp_tx(ppd, 0);
10293 release_chip_resource(dd, qsfp_resource(dd));
10294 } else {
10295 /* not fatal, but should warn */
10296 dd_dev_err(dd,
10297 "Unable to acquire lock to turn off QSFP TX\n");
10298 }
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080010299 }
10300
Mike Marciniszyn77241052015-07-30 15:17:43 -040010301 /*
10302 * The LNI has a mandatory wait time after the physical state
10303 * moves to Offline.Quiet. The wait time may be different
10304 * depending on how the link went down. The 8051 firmware
10305 * will observe the needed wait time and only move to ready
10306 * when that is completed. The largest of the quiet timeouts
Dean Luick05087f3b2015-12-01 15:38:16 -050010307 * is 6s, so wait that long and then at least 0.5s more for
10308 * other transitions, and another 0.5s for a buffer.
Mike Marciniszyn77241052015-07-30 15:17:43 -040010309 */
Dean Luick05087f3b2015-12-01 15:38:16 -050010310 ret = wait_fm_ready(dd, 7000);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010311 if (ret) {
10312 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010313 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040010314 /* state is really offline, so make it so */
10315 ppd->host_link_state = HLS_DN_OFFLINE;
10316 return ret;
10317 }
10318
10319 /*
10320 * The state is now offline and the 8051 is ready to accept host
10321 * requests.
10322 * - change our state
10323 * - notify others if we were previously in a linkup state
10324 */
10325 ppd->host_link_state = HLS_DN_OFFLINE;
10326 if (previous_state & HLS_UP) {
10327 /* went down while link was up */
10328 handle_linkup_change(dd, 0);
10329 } else if (previous_state
10330 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10331 /* went down while attempting link up */
Dean Luick6854c692016-07-25 13:38:56 -070010332 check_lni_states(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010333 }
10334
10335 /* the active link width (downgrade) is 0 on link down */
10336 ppd->link_width_active = 0;
10337 ppd->link_width_downgrade_tx_active = 0;
10338 ppd->link_width_downgrade_rx_active = 0;
10339 ppd->current_egress_rate = 0;
10340 return 0;
10341}
10342
10343/* return the link state name */
10344static const char *link_state_name(u32 state)
10345{
10346 const char *name;
10347 int n = ilog2(state);
10348 static const char * const names[] = {
10349 [__HLS_UP_INIT_BP] = "INIT",
10350 [__HLS_UP_ARMED_BP] = "ARMED",
10351 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10352 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10353 [__HLS_DN_POLL_BP] = "POLL",
10354 [__HLS_DN_DISABLE_BP] = "DISABLE",
10355 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10356 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10357 [__HLS_GOING_UP_BP] = "GOING_UP",
10358 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10359 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10360 };
10361
10362 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10363 return name ? name : "unknown";
10364}
10365
10366/* return the link state reason name */
10367static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10368{
10369 if (state == HLS_UP_INIT) {
10370 switch (ppd->linkinit_reason) {
10371 case OPA_LINKINIT_REASON_LINKUP:
10372 return "(LINKUP)";
10373 case OPA_LINKINIT_REASON_FLAPPING:
10374 return "(FLAPPING)";
10375 case OPA_LINKINIT_OUTSIDE_POLICY:
10376 return "(OUTSIDE_POLICY)";
10377 case OPA_LINKINIT_QUARANTINED:
10378 return "(QUARANTINED)";
10379 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10380 return "(INSUFIC_CAPABILITY)";
10381 default:
10382 break;
10383 }
10384 }
10385 return "";
10386}
10387
10388/*
10389 * driver_physical_state - convert the driver's notion of a port's
10390 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10391 * Return -1 (converted to a u32) to indicate error.
10392 */
10393u32 driver_physical_state(struct hfi1_pportdata *ppd)
10394{
10395 switch (ppd->host_link_state) {
10396 case HLS_UP_INIT:
10397 case HLS_UP_ARMED:
10398 case HLS_UP_ACTIVE:
10399 return IB_PORTPHYSSTATE_LINKUP;
10400 case HLS_DN_POLL:
10401 return IB_PORTPHYSSTATE_POLLING;
10402 case HLS_DN_DISABLE:
10403 return IB_PORTPHYSSTATE_DISABLED;
10404 case HLS_DN_OFFLINE:
10405 return OPA_PORTPHYSSTATE_OFFLINE;
10406 case HLS_VERIFY_CAP:
10407 return IB_PORTPHYSSTATE_POLLING;
10408 case HLS_GOING_UP:
10409 return IB_PORTPHYSSTATE_POLLING;
10410 case HLS_GOING_OFFLINE:
10411 return OPA_PORTPHYSSTATE_OFFLINE;
10412 case HLS_LINK_COOLDOWN:
10413 return OPA_PORTPHYSSTATE_OFFLINE;
10414 case HLS_DN_DOWNDEF:
10415 default:
10416 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10417 ppd->host_link_state);
10418 return -1;
10419 }
10420}
10421
10422/*
10423 * driver_logical_state - convert the driver's notion of a port's
10424 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10425 * (converted to a u32) to indicate error.
10426 */
10427u32 driver_logical_state(struct hfi1_pportdata *ppd)
10428{
Easwar Hariharan0c7f77a2016-05-12 10:22:33 -070010429 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
Mike Marciniszyn77241052015-07-30 15:17:43 -040010430 return IB_PORT_DOWN;
10431
10432 switch (ppd->host_link_state & HLS_UP) {
10433 case HLS_UP_INIT:
10434 return IB_PORT_INIT;
10435 case HLS_UP_ARMED:
10436 return IB_PORT_ARMED;
10437 case HLS_UP_ACTIVE:
10438 return IB_PORT_ACTIVE;
10439 default:
10440 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10441 ppd->host_link_state);
10442 return -1;
10443 }
10444}
10445
10446void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10447 u8 neigh_reason, u8 rem_reason)
10448{
10449 if (ppd->local_link_down_reason.latest == 0 &&
10450 ppd->neigh_link_down_reason.latest == 0) {
10451 ppd->local_link_down_reason.latest = lcl_reason;
10452 ppd->neigh_link_down_reason.latest = neigh_reason;
10453 ppd->remote_link_down_reason = rem_reason;
10454 }
10455}
10456
10457/*
10458 * Change the physical and/or logical link state.
10459 *
10460 * Do not call this routine while inside an interrupt. It contains
10461 * calls to routines that can take multiple seconds to finish.
10462 *
10463 * Returns 0 on success, -errno on failure.
10464 */
10465int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10466{
10467 struct hfi1_devdata *dd = ppd->dd;
10468 struct ib_event event = {.device = NULL};
10469 int ret1, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010470 int orig_new_state, poll_bounce;
10471
10472 mutex_lock(&ppd->hls_lock);
10473
10474 orig_new_state = state;
10475 if (state == HLS_DN_DOWNDEF)
10476 state = dd->link_default;
10477
10478 /* interpret poll -> poll as a link bounce */
Jubin Johnd0d236e2016-02-14 20:20:15 -080010479 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10480 state == HLS_DN_POLL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010481
10482 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
Jubin John17fb4f22016-02-14 20:21:52 -080010483 link_state_name(ppd->host_link_state),
10484 link_state_name(orig_new_state),
10485 poll_bounce ? "(bounce) " : "",
10486 link_state_reason_name(ppd, state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010487
Mike Marciniszyn77241052015-07-30 15:17:43 -040010488 /*
10489 * If we're going to a (HLS_*) link state that implies the logical
10490 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10491 * reset is_sm_config_started to 0.
10492 */
10493 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10494 ppd->is_sm_config_started = 0;
10495
10496 /*
10497 * Do nothing if the states match. Let a poll to poll link bounce
10498 * go through.
10499 */
10500 if (ppd->host_link_state == state && !poll_bounce)
10501 goto done;
10502
10503 switch (state) {
10504 case HLS_UP_INIT:
Jubin Johnd0d236e2016-02-14 20:20:15 -080010505 if (ppd->host_link_state == HLS_DN_POLL &&
10506 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010507 /*
10508 * Quick link up jumps from polling to here.
10509 *
10510 * Whether in normal or loopback mode, the
10511 * simulator jumps from polling to link up.
10512 * Accept that here.
10513 */
Jubin John17fb4f22016-02-14 20:21:52 -080010514 /* OK */
Mike Marciniszyn77241052015-07-30 15:17:43 -040010515 } else if (ppd->host_link_state != HLS_GOING_UP) {
10516 goto unexpected;
10517 }
10518
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010519 /*
10520 * Wait for Link_Up physical state.
10521 * Physical and Logical states should already be
10522 * be transitioned to LinkUp and LinkInit respectively.
10523 */
10524 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10525 if (ret) {
10526 dd_dev_err(dd,
10527 "%s: physical state did not change to LINK-UP\n",
10528 __func__);
10529 break;
10530 }
10531
Mike Marciniszyn77241052015-07-30 15:17:43 -040010532 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10533 if (ret) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010534 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010535 "%s: logical state did not change to INIT\n",
10536 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010537 } else {
10538 /* clear old transient LINKINIT_REASON code */
10539 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10540 ppd->linkinit_reason =
10541 OPA_LINKINIT_REASON_LINKUP;
10542
10543 /* enable the port */
10544 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10545
10546 handle_linkup_change(dd, 1);
Stuart Summers98b9ee22017-04-09 10:16:53 -070010547 ppd->host_link_state = HLS_UP_INIT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010548 }
10549 break;
10550 case HLS_UP_ARMED:
10551 if (ppd->host_link_state != HLS_UP_INIT)
10552 goto unexpected;
10553
10554 ppd->host_link_state = HLS_UP_ARMED;
10555 set_logical_state(dd, LSTATE_ARMED);
10556 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10557 if (ret) {
10558 /* logical state didn't change, stay at init */
10559 ppd->host_link_state = HLS_UP_INIT;
10560 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010561 "%s: logical state did not change to ARMED\n",
10562 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010563 }
10564 /*
10565 * The simulator does not currently implement SMA messages,
10566 * so neighbor_normal is not set. Set it here when we first
10567 * move to Armed.
10568 */
10569 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10570 ppd->neighbor_normal = 1;
10571 break;
10572 case HLS_UP_ACTIVE:
10573 if (ppd->host_link_state != HLS_UP_ARMED)
10574 goto unexpected;
10575
10576 ppd->host_link_state = HLS_UP_ACTIVE;
10577 set_logical_state(dd, LSTATE_ACTIVE);
10578 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10579 if (ret) {
10580 /* logical state didn't change, stay at armed */
10581 ppd->host_link_state = HLS_UP_ARMED;
10582 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010583 "%s: logical state did not change to ACTIVE\n",
10584 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010585 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040010586 /* tell all engines to go running */
10587 sdma_all_running(dd);
10588
10589 /* Signal the IB layer that the port has went active */
Dennis Dalessandroec3f2c12016-01-19 14:41:33 -080010590 event.device = &dd->verbs_dev.rdi.ibdev;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010591 event.element.port_num = ppd->port;
10592 event.event = IB_EVENT_PORT_ACTIVE;
10593 }
10594 break;
10595 case HLS_DN_POLL:
10596 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10597 ppd->host_link_state == HLS_DN_OFFLINE) &&
10598 dd->dc_shutdown)
10599 dc_start(dd);
10600 /* Hand LED control to the DC */
10601 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10602
10603 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10604 u8 tmp = ppd->link_enabled;
10605
10606 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10607 if (ret) {
10608 ppd->link_enabled = tmp;
10609 break;
10610 }
10611 ppd->remote_link_down_reason = 0;
10612
10613 if (ppd->driver_link_ready)
10614 ppd->link_enabled = 1;
10615 }
10616
Jim Snowfb9036d2016-01-11 18:32:21 -050010617 set_all_slowpath(ppd->dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010618 ret = set_local_link_attributes(ppd);
10619 if (ret)
10620 break;
10621
10622 ppd->port_error_action = 0;
10623 ppd->host_link_state = HLS_DN_POLL;
10624
10625 if (quick_linkup) {
10626 /* quick linkup does not go into polling */
10627 ret = do_quick_linkup(dd);
10628 } else {
10629 ret1 = set_physical_link_state(dd, PLS_POLLING);
10630 if (ret1 != HCMD_SUCCESS) {
10631 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010632 "Failed to transition to Polling link state, return 0x%x\n",
10633 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010634 ret = -EINVAL;
10635 }
10636 }
Bryan Morgana9c05e32016-02-03 14:30:49 -080010637 ppd->offline_disabled_reason =
10638 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010639 /*
10640 * If an error occurred above, go back to offline. The
10641 * caller may reschedule another attempt.
10642 */
10643 if (ret)
10644 goto_offline(ppd, 0);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010645 else
10646 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010647 break;
10648 case HLS_DN_DISABLE:
10649 /* link is disabled */
10650 ppd->link_enabled = 0;
10651
10652 /* allow any state to transition to disabled */
10653
10654 /* must transition to offline first */
10655 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10656 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10657 if (ret)
10658 break;
10659 ppd->remote_link_down_reason = 0;
10660 }
10661
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010662 if (!dd->dc_shutdown) {
10663 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10664 if (ret1 != HCMD_SUCCESS) {
10665 dd_dev_err(dd,
10666 "Failed to transition to Disabled link state, return 0x%x\n",
10667 ret1);
10668 ret = -EINVAL;
10669 break;
10670 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010671 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10672 if (ret) {
10673 dd_dev_err(dd,
10674 "%s: physical state did not change to DISABLED\n",
10675 __func__);
10676 break;
10677 }
Michael J. Ruhldb069ec2017-02-08 05:28:13 -080010678 dc_shutdown(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010679 }
10680 ppd->host_link_state = HLS_DN_DISABLE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010681 break;
10682 case HLS_DN_OFFLINE:
10683 if (ppd->host_link_state == HLS_DN_DISABLE)
10684 dc_start(dd);
10685
10686 /* allow any state to transition to offline */
10687 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10688 if (!ret)
10689 ppd->remote_link_down_reason = 0;
10690 break;
10691 case HLS_VERIFY_CAP:
10692 if (ppd->host_link_state != HLS_DN_POLL)
10693 goto unexpected;
10694 ppd->host_link_state = HLS_VERIFY_CAP;
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070010695 cache_physical_state(ppd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010696 break;
10697 case HLS_GOING_UP:
10698 if (ppd->host_link_state != HLS_VERIFY_CAP)
10699 goto unexpected;
10700
10701 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10702 if (ret1 != HCMD_SUCCESS) {
10703 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010704 "Failed to transition to link up state, return 0x%x\n",
10705 ret1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010706 ret = -EINVAL;
10707 break;
10708 }
10709 ppd->host_link_state = HLS_GOING_UP;
10710 break;
10711
10712 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10713 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10714 default:
10715 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010716 __func__, state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010717 ret = -EINVAL;
10718 break;
10719 }
10720
Mike Marciniszyn77241052015-07-30 15:17:43 -040010721 goto done;
10722
10723unexpected:
10724 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
Jubin John17fb4f22016-02-14 20:21:52 -080010725 __func__, link_state_name(ppd->host_link_state),
10726 link_state_name(state));
Mike Marciniszyn77241052015-07-30 15:17:43 -040010727 ret = -EINVAL;
10728
10729done:
10730 mutex_unlock(&ppd->hls_lock);
10731
10732 if (event.device)
10733 ib_dispatch_event(&event);
10734
10735 return ret;
10736}
10737
10738int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10739{
10740 u64 reg;
10741 int ret = 0;
10742
10743 switch (which) {
10744 case HFI1_IB_CFG_LIDLMC:
10745 set_lidlmc(ppd);
10746 break;
10747 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10748 /*
10749 * The VL Arbitrator high limit is sent in units of 4k
10750 * bytes, while HFI stores it in units of 64 bytes.
10751 */
Jubin John8638b772016-02-14 20:19:24 -080010752 val *= 4096 / 64;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010753 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10754 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10755 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10756 break;
10757 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10758 /* HFI only supports POLL as the default link down state */
10759 if (val != HLS_DN_POLL)
10760 ret = -EINVAL;
10761 break;
10762 case HFI1_IB_CFG_OP_VLS:
10763 if (ppd->vls_operational != val) {
10764 ppd->vls_operational = val;
10765 if (!ppd->port)
10766 ret = -EINVAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040010767 }
10768 break;
10769 /*
10770 * For link width, link width downgrade, and speed enable, always AND
10771 * the setting with what is actually supported. This has two benefits.
10772 * First, enabled can't have unsupported values, no matter what the
10773 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10774 * "fill in with your supported value" have all the bits in the
10775 * field set, so simply ANDing with supported has the desired result.
10776 */
10777 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10778 ppd->link_width_enabled = val & ppd->link_width_supported;
10779 break;
10780 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10781 ppd->link_width_downgrade_enabled =
10782 val & ppd->link_width_downgrade_supported;
10783 break;
10784 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10785 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10786 break;
10787 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10788 /*
10789 * HFI does not follow IB specs, save this value
10790 * so we can report it, if asked.
10791 */
10792 ppd->overrun_threshold = val;
10793 break;
10794 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10795 /*
10796 * HFI does not follow IB specs, save this value
10797 * so we can report it, if asked.
10798 */
10799 ppd->phy_error_threshold = val;
10800 break;
10801
10802 case HFI1_IB_CFG_MTU:
10803 set_send_length(ppd);
10804 break;
10805
10806 case HFI1_IB_CFG_PKEYS:
10807 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10808 set_partition_keys(ppd);
10809 break;
10810
10811 default:
10812 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10813 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080010814 "%s: which %s, val 0x%x: not implemented\n",
10815 __func__, ib_cfg_name(which), val);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010816 break;
10817 }
10818 return ret;
10819}
10820
10821/* begin functions related to vl arbitration table caching */
10822static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10823{
10824 int i;
10825
10826 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10827 VL_ARB_LOW_PRIO_TABLE_SIZE);
10828 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10829 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10830
10831 /*
10832 * Note that we always return values directly from the
10833 * 'vl_arb_cache' (and do no CSR reads) in response to a
10834 * 'Get(VLArbTable)'. This is obviously correct after a
10835 * 'Set(VLArbTable)', since the cache will then be up to
10836 * date. But it's also correct prior to any 'Set(VLArbTable)'
10837 * since then both the cache, and the relevant h/w registers
10838 * will be zeroed.
10839 */
10840
10841 for (i = 0; i < MAX_PRIO_TABLE; i++)
10842 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10843}
10844
10845/*
10846 * vl_arb_lock_cache
10847 *
10848 * All other vl_arb_* functions should be called only after locking
10849 * the cache.
10850 */
10851static inline struct vl_arb_cache *
10852vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10853{
10854 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10855 return NULL;
10856 spin_lock(&ppd->vl_arb_cache[idx].lock);
10857 return &ppd->vl_arb_cache[idx];
10858}
10859
10860static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10861{
10862 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10863}
10864
10865static void vl_arb_get_cache(struct vl_arb_cache *cache,
10866 struct ib_vl_weight_elem *vl)
10867{
10868 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10869}
10870
10871static void vl_arb_set_cache(struct vl_arb_cache *cache,
10872 struct ib_vl_weight_elem *vl)
10873{
10874 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10875}
10876
10877static int vl_arb_match_cache(struct vl_arb_cache *cache,
10878 struct ib_vl_weight_elem *vl)
10879{
10880 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10881}
Jubin Johnf4d507c2016-02-14 20:20:25 -080010882
Mike Marciniszyn77241052015-07-30 15:17:43 -040010883/* end functions related to vl arbitration table caching */
10884
10885static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10886 u32 size, struct ib_vl_weight_elem *vl)
10887{
10888 struct hfi1_devdata *dd = ppd->dd;
10889 u64 reg;
10890 unsigned int i, is_up = 0;
10891 int drain, ret = 0;
10892
10893 mutex_lock(&ppd->hls_lock);
10894
10895 if (ppd->host_link_state & HLS_UP)
10896 is_up = 1;
10897
10898 drain = !is_ax(dd) && is_up;
10899
10900 if (drain)
10901 /*
10902 * Before adjusting VL arbitration weights, empty per-VL
10903 * FIFOs, otherwise a packet whose VL weight is being
10904 * set to 0 could get stuck in a FIFO with no chance to
10905 * egress.
10906 */
10907 ret = stop_drain_data_vls(dd);
10908
10909 if (ret) {
10910 dd_dev_err(
10911 dd,
10912 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10913 __func__);
10914 goto err;
10915 }
10916
10917 for (i = 0; i < size; i++, vl++) {
10918 /*
10919 * NOTE: The low priority shift and mask are used here, but
10920 * they are the same for both the low and high registers.
10921 */
10922 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10923 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10924 | (((u64)vl->weight
10925 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10926 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10927 write_csr(dd, target + (i * 8), reg);
10928 }
10929 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10930
10931 if (drain)
10932 open_fill_data_vls(dd); /* reopen all VLs */
10933
10934err:
10935 mutex_unlock(&ppd->hls_lock);
10936
10937 return ret;
10938}
10939
10940/*
10941 * Read one credit merge VL register.
10942 */
10943static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10944 struct vl_limit *vll)
10945{
10946 u64 reg = read_csr(dd, csr);
10947
10948 vll->dedicated = cpu_to_be16(
10949 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10950 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10951 vll->shared = cpu_to_be16(
10952 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10953 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10954}
10955
10956/*
10957 * Read the current credit merge limits.
10958 */
10959static int get_buffer_control(struct hfi1_devdata *dd,
10960 struct buffer_control *bc, u16 *overall_limit)
10961{
10962 u64 reg;
10963 int i;
10964
10965 /* not all entries are filled in */
10966 memset(bc, 0, sizeof(*bc));
10967
10968 /* OPA and HFI have a 1-1 mapping */
10969 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080010970 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
Mike Marciniszyn77241052015-07-30 15:17:43 -040010971
10972 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10973 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10974
10975 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10976 bc->overall_shared_limit = cpu_to_be16(
10977 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10978 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10979 if (overall_limit)
10980 *overall_limit = (reg
10981 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10982 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10983 return sizeof(struct buffer_control);
10984}
10985
10986static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10987{
10988 u64 reg;
10989 int i;
10990
10991 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10992 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10993 for (i = 0; i < sizeof(u64); i++) {
10994 u8 byte = *(((u8 *)&reg) + i);
10995
10996 dp->vlnt[2 * i] = byte & 0xf;
10997 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10998 }
10999
11000 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11001 for (i = 0; i < sizeof(u64); i++) {
11002 u8 byte = *(((u8 *)&reg) + i);
11003
11004 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11005 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11006 }
11007 return sizeof(struct sc2vlnt);
11008}
11009
11010static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11011 struct ib_vl_weight_elem *vl)
11012{
11013 unsigned int i;
11014
11015 for (i = 0; i < nelems; i++, vl++) {
11016 vl->vl = 0xf;
11017 vl->weight = 0;
11018 }
11019}
11020
11021static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11022{
11023 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
Jubin John17fb4f22016-02-14 20:21:52 -080011024 DC_SC_VL_VAL(15_0,
11025 0, dp->vlnt[0] & 0xf,
11026 1, dp->vlnt[1] & 0xf,
11027 2, dp->vlnt[2] & 0xf,
11028 3, dp->vlnt[3] & 0xf,
11029 4, dp->vlnt[4] & 0xf,
11030 5, dp->vlnt[5] & 0xf,
11031 6, dp->vlnt[6] & 0xf,
11032 7, dp->vlnt[7] & 0xf,
11033 8, dp->vlnt[8] & 0xf,
11034 9, dp->vlnt[9] & 0xf,
11035 10, dp->vlnt[10] & 0xf,
11036 11, dp->vlnt[11] & 0xf,
11037 12, dp->vlnt[12] & 0xf,
11038 13, dp->vlnt[13] & 0xf,
11039 14, dp->vlnt[14] & 0xf,
11040 15, dp->vlnt[15] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011041 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
Jubin John17fb4f22016-02-14 20:21:52 -080011042 DC_SC_VL_VAL(31_16,
11043 16, dp->vlnt[16] & 0xf,
11044 17, dp->vlnt[17] & 0xf,
11045 18, dp->vlnt[18] & 0xf,
11046 19, dp->vlnt[19] & 0xf,
11047 20, dp->vlnt[20] & 0xf,
11048 21, dp->vlnt[21] & 0xf,
11049 22, dp->vlnt[22] & 0xf,
11050 23, dp->vlnt[23] & 0xf,
11051 24, dp->vlnt[24] & 0xf,
11052 25, dp->vlnt[25] & 0xf,
11053 26, dp->vlnt[26] & 0xf,
11054 27, dp->vlnt[27] & 0xf,
11055 28, dp->vlnt[28] & 0xf,
11056 29, dp->vlnt[29] & 0xf,
11057 30, dp->vlnt[30] & 0xf,
11058 31, dp->vlnt[31] & 0xf));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011059}
11060
11061static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11062 u16 limit)
11063{
11064 if (limit != 0)
11065 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011066 what, (int)limit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011067}
11068
11069/* change only the shared limit portion of SendCmGLobalCredit */
11070static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11071{
11072 u64 reg;
11073
11074 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11075 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11076 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11077 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11078}
11079
11080/* change only the total credit limit portion of SendCmGLobalCredit */
11081static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11082{
11083 u64 reg;
11084
11085 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11086 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11087 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11088 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11089}
11090
11091/* set the given per-VL shared limit */
11092static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11093{
11094 u64 reg;
11095 u32 addr;
11096
11097 if (vl < TXE_NUM_DATA_VL)
11098 addr = SEND_CM_CREDIT_VL + (8 * vl);
11099 else
11100 addr = SEND_CM_CREDIT_VL15;
11101
11102 reg = read_csr(dd, addr);
11103 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11104 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11105 write_csr(dd, addr, reg);
11106}
11107
11108/* set the given per-VL dedicated limit */
11109static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11110{
11111 u64 reg;
11112 u32 addr;
11113
11114 if (vl < TXE_NUM_DATA_VL)
11115 addr = SEND_CM_CREDIT_VL + (8 * vl);
11116 else
11117 addr = SEND_CM_CREDIT_VL15;
11118
11119 reg = read_csr(dd, addr);
11120 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11121 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11122 write_csr(dd, addr, reg);
11123}
11124
11125/* spin until the given per-VL status mask bits clear */
11126static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11127 const char *which)
11128{
11129 unsigned long timeout;
11130 u64 reg;
11131
11132 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11133 while (1) {
11134 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11135
11136 if (reg == 0)
11137 return; /* success */
11138 if (time_after(jiffies, timeout))
11139 break; /* timed out */
11140 udelay(1);
11141 }
11142
11143 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011144 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11145 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011146 /*
11147 * If this occurs, it is likely there was a credit loss on the link.
11148 * The only recovery from that is a link bounce.
11149 */
11150 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011151 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011152}
11153
11154/*
11155 * The number of credits on the VLs may be changed while everything
11156 * is "live", but the following algorithm must be followed due to
11157 * how the hardware is actually implemented. In particular,
11158 * Return_Credit_Status[] is the only correct status check.
11159 *
11160 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11161 * set Global_Shared_Credit_Limit = 0
11162 * use_all_vl = 1
11163 * mask0 = all VLs that are changing either dedicated or shared limits
11164 * set Shared_Limit[mask0] = 0
11165 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11166 * if (changing any dedicated limit)
11167 * mask1 = all VLs that are lowering dedicated limits
11168 * lower Dedicated_Limit[mask1]
11169 * spin until Return_Credit_Status[mask1] == 0
11170 * raise Dedicated_Limits
11171 * raise Shared_Limits
11172 * raise Global_Shared_Credit_Limit
11173 *
11174 * lower = if the new limit is lower, set the limit to the new value
11175 * raise = if the new limit is higher than the current value (may be changed
11176 * earlier in the algorithm), set the new limit to the new value
11177 */
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011178int set_buffer_control(struct hfi1_pportdata *ppd,
11179 struct buffer_control *new_bc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011180{
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011181 struct hfi1_devdata *dd = ppd->dd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011182 u64 changing_mask, ld_mask, stat_mask;
11183 int change_count;
11184 int i, use_all_mask;
11185 int this_shared_changing;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011186 int vl_count = 0, ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011187 /*
11188 * A0: add the variable any_shared_limit_changing below and in the
11189 * algorithm above. If removing A0 support, it can be removed.
11190 */
11191 int any_shared_limit_changing;
11192 struct buffer_control cur_bc;
11193 u8 changing[OPA_MAX_VLS];
11194 u8 lowering_dedicated[OPA_MAX_VLS];
11195 u16 cur_total;
11196 u32 new_total = 0;
11197 const u64 all_mask =
11198 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11199 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11200 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11201 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11202 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11203 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11204 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11205 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11206 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11207
11208#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11209#define NUM_USABLE_VLS 16 /* look at VL15 and less */
11210
Mike Marciniszyn77241052015-07-30 15:17:43 -040011211 /* find the new total credits, do sanity check on unused VLs */
11212 for (i = 0; i < OPA_MAX_VLS; i++) {
11213 if (valid_vl(i)) {
11214 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11215 continue;
11216 }
11217 nonzero_msg(dd, i, "dedicated",
Jubin John17fb4f22016-02-14 20:21:52 -080011218 be16_to_cpu(new_bc->vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011219 nonzero_msg(dd, i, "shared",
Jubin John17fb4f22016-02-14 20:21:52 -080011220 be16_to_cpu(new_bc->vl[i].shared));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011221 new_bc->vl[i].dedicated = 0;
11222 new_bc->vl[i].shared = 0;
11223 }
11224 new_total += be16_to_cpu(new_bc->overall_shared_limit);
Dean Luickbff14bb2015-12-17 19:24:13 -050011225
Mike Marciniszyn77241052015-07-30 15:17:43 -040011226 /* fetch the current values */
11227 get_buffer_control(dd, &cur_bc, &cur_total);
11228
11229 /*
11230 * Create the masks we will use.
11231 */
11232 memset(changing, 0, sizeof(changing));
11233 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
Jubin John4d114fd2016-02-14 20:21:43 -080011234 /*
11235 * NOTE: Assumes that the individual VL bits are adjacent and in
11236 * increasing order
11237 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011238 stat_mask =
11239 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11240 changing_mask = 0;
11241 ld_mask = 0;
11242 change_count = 0;
11243 any_shared_limit_changing = 0;
11244 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11245 if (!valid_vl(i))
11246 continue;
11247 this_shared_changing = new_bc->vl[i].shared
11248 != cur_bc.vl[i].shared;
11249 if (this_shared_changing)
11250 any_shared_limit_changing = 1;
Jubin Johnd0d236e2016-02-14 20:20:15 -080011251 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11252 this_shared_changing) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011253 changing[i] = 1;
11254 changing_mask |= stat_mask;
11255 change_count++;
11256 }
11257 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11258 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11259 lowering_dedicated[i] = 1;
11260 ld_mask |= stat_mask;
11261 }
11262 }
11263
11264 /* bracket the credit change with a total adjustment */
11265 if (new_total > cur_total)
11266 set_global_limit(dd, new_total);
11267
11268 /*
11269 * Start the credit change algorithm.
11270 */
11271 use_all_mask = 0;
11272 if ((be16_to_cpu(new_bc->overall_shared_limit) <
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011273 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11274 (is_ax(dd) && any_shared_limit_changing)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011275 set_global_shared(dd, 0);
11276 cur_bc.overall_shared_limit = 0;
11277 use_all_mask = 1;
11278 }
11279
11280 for (i = 0; i < NUM_USABLE_VLS; i++) {
11281 if (!valid_vl(i))
11282 continue;
11283
11284 if (changing[i]) {
11285 set_vl_shared(dd, i, 0);
11286 cur_bc.vl[i].shared = 0;
11287 }
11288 }
11289
11290 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
Jubin John17fb4f22016-02-14 20:21:52 -080011291 "shared");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011292
11293 if (change_count > 0) {
11294 for (i = 0; i < NUM_USABLE_VLS; i++) {
11295 if (!valid_vl(i))
11296 continue;
11297
11298 if (lowering_dedicated[i]) {
11299 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011300 be16_to_cpu(new_bc->
11301 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011302 cur_bc.vl[i].dedicated =
11303 new_bc->vl[i].dedicated;
11304 }
11305 }
11306
11307 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11308
11309 /* now raise all dedicated that are going up */
11310 for (i = 0; i < NUM_USABLE_VLS; i++) {
11311 if (!valid_vl(i))
11312 continue;
11313
11314 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11315 be16_to_cpu(cur_bc.vl[i].dedicated))
11316 set_vl_dedicated(dd, i,
Jubin John17fb4f22016-02-14 20:21:52 -080011317 be16_to_cpu(new_bc->
11318 vl[i].dedicated));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011319 }
11320 }
11321
11322 /* next raise all shared that are going up */
11323 for (i = 0; i < NUM_USABLE_VLS; i++) {
11324 if (!valid_vl(i))
11325 continue;
11326
11327 if (be16_to_cpu(new_bc->vl[i].shared) >
11328 be16_to_cpu(cur_bc.vl[i].shared))
11329 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11330 }
11331
11332 /* finally raise the global shared */
11333 if (be16_to_cpu(new_bc->overall_shared_limit) >
Jubin John17fb4f22016-02-14 20:21:52 -080011334 be16_to_cpu(cur_bc.overall_shared_limit))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011335 set_global_shared(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080011336 be16_to_cpu(new_bc->overall_shared_limit));
Mike Marciniszyn77241052015-07-30 15:17:43 -040011337
11338 /* bracket the credit change with a total adjustment */
11339 if (new_total < cur_total)
11340 set_global_limit(dd, new_total);
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011341
11342 /*
11343 * Determine the actual number of operational VLS using the number of
11344 * dedicated and shared credits for each VL.
11345 */
11346 if (change_count > 0) {
11347 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11348 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11349 be16_to_cpu(new_bc->vl[i].shared) > 0)
11350 vl_count++;
11351 ppd->actual_vls_operational = vl_count;
11352 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11353 ppd->actual_vls_operational :
11354 ppd->vls_operational,
11355 NULL);
11356 if (ret == 0)
11357 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11358 ppd->actual_vls_operational :
11359 ppd->vls_operational, NULL);
11360 if (ret)
11361 return ret;
11362 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011363 return 0;
11364}
11365
11366/*
11367 * Read the given fabric manager table. Return the size of the
11368 * table (in bytes) on success, and a negative error code on
11369 * failure.
11370 */
11371int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11372
11373{
11374 int size;
11375 struct vl_arb_cache *vlc;
11376
11377 switch (which) {
11378 case FM_TBL_VL_HIGH_ARB:
11379 size = 256;
11380 /*
11381 * OPA specifies 128 elements (of 2 bytes each), though
11382 * HFI supports only 16 elements in h/w.
11383 */
11384 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11385 vl_arb_get_cache(vlc, t);
11386 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11387 break;
11388 case FM_TBL_VL_LOW_ARB:
11389 size = 256;
11390 /*
11391 * OPA specifies 128 elements (of 2 bytes each), though
11392 * HFI supports only 16 elements in h/w.
11393 */
11394 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11395 vl_arb_get_cache(vlc, t);
11396 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11397 break;
11398 case FM_TBL_BUFFER_CONTROL:
11399 size = get_buffer_control(ppd->dd, t, NULL);
11400 break;
11401 case FM_TBL_SC2VLNT:
11402 size = get_sc2vlnt(ppd->dd, t);
11403 break;
11404 case FM_TBL_VL_PREEMPT_ELEMS:
11405 size = 256;
11406 /* OPA specifies 128 elements, of 2 bytes each */
11407 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11408 break;
11409 case FM_TBL_VL_PREEMPT_MATRIX:
11410 size = 256;
11411 /*
11412 * OPA specifies that this is the same size as the VL
11413 * arbitration tables (i.e., 256 bytes).
11414 */
11415 break;
11416 default:
11417 return -EINVAL;
11418 }
11419 return size;
11420}
11421
11422/*
11423 * Write the given fabric manager table.
11424 */
11425int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11426{
11427 int ret = 0;
11428 struct vl_arb_cache *vlc;
11429
11430 switch (which) {
11431 case FM_TBL_VL_HIGH_ARB:
11432 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11433 if (vl_arb_match_cache(vlc, t)) {
11434 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11435 break;
11436 }
11437 vl_arb_set_cache(vlc, t);
11438 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11439 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11440 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11441 break;
11442 case FM_TBL_VL_LOW_ARB:
11443 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11444 if (vl_arb_match_cache(vlc, t)) {
11445 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11446 break;
11447 }
11448 vl_arb_set_cache(vlc, t);
11449 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11450 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11451 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11452 break;
11453 case FM_TBL_BUFFER_CONTROL:
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080011454 ret = set_buffer_control(ppd, t);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011455 break;
11456 case FM_TBL_SC2VLNT:
11457 set_sc2vlnt(ppd->dd, t);
11458 break;
11459 default:
11460 ret = -EINVAL;
11461 }
11462 return ret;
11463}
11464
11465/*
11466 * Disable all data VLs.
11467 *
11468 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11469 */
11470static int disable_data_vls(struct hfi1_devdata *dd)
11471{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011472 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011473 return 1;
11474
11475 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11476
11477 return 0;
11478}
11479
11480/*
11481 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11482 * Just re-enables all data VLs (the "fill" part happens
11483 * automatically - the name was chosen for symmetry with
11484 * stop_drain_data_vls()).
11485 *
11486 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11487 */
11488int open_fill_data_vls(struct hfi1_devdata *dd)
11489{
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050011490 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040011491 return 1;
11492
11493 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11494
11495 return 0;
11496}
11497
11498/*
11499 * drain_data_vls() - assumes that disable_data_vls() has been called,
11500 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11501 * engines to drop to 0.
11502 */
11503static void drain_data_vls(struct hfi1_devdata *dd)
11504{
11505 sc_wait(dd);
11506 sdma_wait(dd);
11507 pause_for_credit_return(dd);
11508}
11509
11510/*
11511 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11512 *
11513 * Use open_fill_data_vls() to resume using data VLs. This pair is
11514 * meant to be used like this:
11515 *
11516 * stop_drain_data_vls(dd);
11517 * // do things with per-VL resources
11518 * open_fill_data_vls(dd);
11519 */
11520int stop_drain_data_vls(struct hfi1_devdata *dd)
11521{
11522 int ret;
11523
11524 ret = disable_data_vls(dd);
11525 if (ret == 0)
11526 drain_data_vls(dd);
11527
11528 return ret;
11529}
11530
11531/*
11532 * Convert a nanosecond time to a cclock count. No matter how slow
11533 * the cclock, a non-zero ns will always have a non-zero result.
11534 */
11535u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11536{
11537 u32 cclocks;
11538
11539 if (dd->icode == ICODE_FPGA_EMULATION)
11540 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11541 else /* simulation pretends to be ASIC */
11542 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11543 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11544 cclocks = 1;
11545 return cclocks;
11546}
11547
11548/*
11549 * Convert a cclock count to nanoseconds. Not matter how slow
11550 * the cclock, a non-zero cclocks will always have a non-zero result.
11551 */
11552u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11553{
11554 u32 ns;
11555
11556 if (dd->icode == ICODE_FPGA_EMULATION)
11557 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11558 else /* simulation pretends to be ASIC */
11559 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11560 if (cclocks && !ns)
11561 ns = 1;
11562 return ns;
11563}
11564
11565/*
11566 * Dynamically adjust the receive interrupt timeout for a context based on
11567 * incoming packet rate.
11568 *
11569 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11570 */
11571static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11572{
11573 struct hfi1_devdata *dd = rcd->dd;
11574 u32 timeout = rcd->rcvavail_timeout;
11575
11576 /*
11577 * This algorithm doubles or halves the timeout depending on whether
11578 * the number of packets received in this interrupt were less than or
11579 * greater equal the interrupt count.
11580 *
11581 * The calculations below do not allow a steady state to be achieved.
11582 * Only at the endpoints it is possible to have an unchanging
11583 * timeout.
11584 */
11585 if (npkts < rcv_intr_count) {
11586 /*
11587 * Not enough packets arrived before the timeout, adjust
11588 * timeout downward.
11589 */
11590 if (timeout < 2) /* already at minimum? */
11591 return;
11592 timeout >>= 1;
11593 } else {
11594 /*
11595 * More than enough packets arrived before the timeout, adjust
11596 * timeout upward.
11597 */
11598 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11599 return;
11600 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11601 }
11602
11603 rcd->rcvavail_timeout = timeout;
Jubin John4d114fd2016-02-14 20:21:43 -080011604 /*
11605 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11606 * been verified to be in range
11607 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011608 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011609 (u64)timeout <<
11610 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011611}
11612
11613void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11614 u32 intr_adjust, u32 npkts)
11615{
11616 struct hfi1_devdata *dd = rcd->dd;
11617 u64 reg;
11618 u32 ctxt = rcd->ctxt;
11619
11620 /*
11621 * Need to write timeout register before updating RcvHdrHead to ensure
11622 * that a new value is used when the HW decides to restart counting.
11623 */
11624 if (intr_adjust)
11625 adjust_rcv_timeout(rcd, npkts);
11626 if (updegr) {
11627 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11628 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11629 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11630 }
11631 mmiowb();
11632 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11633 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11634 << RCV_HDR_HEAD_HEAD_SHIFT);
11635 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11636 mmiowb();
11637}
11638
11639u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11640{
11641 u32 head, tail;
11642
11643 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11644 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11645
11646 if (rcd->rcvhdrtail_kvaddr)
11647 tail = get_rcvhdrtail(rcd);
11648 else
11649 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11650
11651 return head == tail;
11652}
11653
11654/*
11655 * Context Control and Receive Array encoding for buffer size:
11656 * 0x0 invalid
11657 * 0x1 4 KB
11658 * 0x2 8 KB
11659 * 0x3 16 KB
11660 * 0x4 32 KB
11661 * 0x5 64 KB
11662 * 0x6 128 KB
11663 * 0x7 256 KB
11664 * 0x8 512 KB (Receive Array only)
11665 * 0x9 1 MB (Receive Array only)
11666 * 0xa 2 MB (Receive Array only)
11667 *
11668 * 0xB-0xF - reserved (Receive Array only)
11669 *
11670 *
11671 * This routine assumes that the value has already been sanity checked.
11672 */
11673static u32 encoded_size(u32 size)
11674{
11675 switch (size) {
Jubin John8638b772016-02-14 20:19:24 -080011676 case 4 * 1024: return 0x1;
11677 case 8 * 1024: return 0x2;
11678 case 16 * 1024: return 0x3;
11679 case 32 * 1024: return 0x4;
11680 case 64 * 1024: return 0x5;
11681 case 128 * 1024: return 0x6;
11682 case 256 * 1024: return 0x7;
11683 case 512 * 1024: return 0x8;
11684 case 1 * 1024 * 1024: return 0x9;
11685 case 2 * 1024 * 1024: return 0xa;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011686 }
11687 return 0x1; /* if invalid, go with the minimum size */
11688}
11689
11690void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11691{
11692 struct hfi1_ctxtdata *rcd;
11693 u64 rcvctrl, reg;
11694 int did_enable = 0;
11695
11696 rcd = dd->rcd[ctxt];
11697 if (!rcd)
11698 return;
11699
11700 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11701
11702 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11703 /* if the context already enabled, don't do the extra steps */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011704 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11705 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011706 /* reset the tail and hdr addresses, and sequence count */
11707 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011708 rcd->rcvhdrq_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011709 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11710 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011711 rcd->rcvhdrqtailaddr_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011712 rcd->seq_cnt = 1;
11713
11714 /* reset the cached receive header queue head value */
11715 rcd->head = 0;
11716
11717 /*
11718 * Zero the receive header queue so we don't get false
11719 * positives when checking the sequence number. The
11720 * sequence numbers could land exactly on the same spot.
11721 * E.g. a rcd restart before the receive header wrapped.
11722 */
11723 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11724
11725 /* starting timeout */
11726 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11727
11728 /* enable the context */
11729 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11730
11731 /* clean the egr buffer size first */
11732 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11733 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11734 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11735 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11736
11737 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11738 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11739 did_enable = 1;
11740
11741 /* zero RcvEgrIndexHead */
11742 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11743
11744 /* set eager count and base index */
11745 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11746 & RCV_EGR_CTRL_EGR_CNT_MASK)
11747 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11748 (((rcd->eager_base >> RCV_SHIFT)
11749 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11750 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11751 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11752
11753 /*
11754 * Set TID (expected) count and base index.
11755 * rcd->expected_count is set to individual RcvArray entries,
11756 * not pairs, and the CSR takes a pair-count in groups of
11757 * four, so divide by 8.
11758 */
11759 reg = (((rcd->expected_count >> RCV_SHIFT)
11760 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11761 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11762 (((rcd->expected_base >> RCV_SHIFT)
11763 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11764 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11765 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050011766 if (ctxt == HFI1_CTRL_CTXT)
11767 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011768 }
11769 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11770 write_csr(dd, RCV_VL15, 0);
Mark F. Brown46b010d2015-11-09 19:18:20 -050011771 /*
11772 * When receive context is being disabled turn on tail
11773 * update with a dummy tail address and then disable
11774 * receive context.
11775 */
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011776 if (dd->rcvhdrtail_dummy_dma) {
Mark F. Brown46b010d2015-11-09 19:18:20 -050011777 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011778 dd->rcvhdrtail_dummy_dma);
Mitko Haralanov566c1572016-02-03 14:32:49 -080011779 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011780 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11781 }
11782
Mike Marciniszyn77241052015-07-30 15:17:43 -040011783 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11784 }
11785 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11786 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11787 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11788 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011789 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011790 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
Mitko Haralanov566c1572016-02-03 14:32:49 -080011791 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11792 /* See comment on RcvCtxtCtrl.TailUpd above */
11793 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11794 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11795 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011796 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11797 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11798 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11799 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11800 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
Jubin John4d114fd2016-02-14 20:21:43 -080011801 /*
11802 * In one-packet-per-eager mode, the size comes from
11803 * the RcvArray entry.
11804 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040011805 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11806 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11807 }
11808 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11809 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11810 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11811 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11812 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11813 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11814 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11815 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11816 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11817 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11818 rcd->rcvctrl = rcvctrl;
11819 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11820 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11821
11822 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
Jubin Johnd0d236e2016-02-14 20:20:15 -080011823 if (did_enable &&
11824 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040011825 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11826 if (reg != 0) {
11827 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011828 ctxt, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011829 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11830 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11831 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11832 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11833 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11834 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080011835 ctxt, reg, reg == 0 ? "not" : "still");
Mike Marciniszyn77241052015-07-30 15:17:43 -040011836 }
11837 }
11838
11839 if (did_enable) {
11840 /*
11841 * The interrupt timeout and count must be set after
11842 * the context is enabled to take effect.
11843 */
11844 /* set interrupt timeout */
11845 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
Jubin John17fb4f22016-02-14 20:21:52 -080011846 (u64)rcd->rcvavail_timeout <<
Mike Marciniszyn77241052015-07-30 15:17:43 -040011847 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11848
11849 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11850 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11851 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11852 }
11853
11854 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11855 /*
11856 * If the context has been disabled and the Tail Update has
Mark F. Brown46b010d2015-11-09 19:18:20 -050011857 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11858 * so it doesn't contain an address that is invalid.
Mike Marciniszyn77241052015-07-30 15:17:43 -040011859 */
Mark F. Brown46b010d2015-11-09 19:18:20 -050011860 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
Tymoteusz Kielan60368182016-09-06 04:35:54 -070011861 dd->rcvhdrtail_dummy_dma);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011862}
11863
Dean Luick582e05c2016-02-18 11:13:01 -080011864u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011865{
11866 int ret;
11867 u64 val = 0;
11868
11869 if (namep) {
11870 ret = dd->cntrnameslen;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011871 *namep = dd->cntrnames;
11872 } else {
11873 const struct cntr_entry *entry;
11874 int i, j;
11875
11876 ret = (dd->ndevcntrs) * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011877
11878 /* Get the start of the block of counters */
11879 *cntrp = dd->cntrs;
11880
11881 /*
11882 * Now go and fill in each counter in the block.
11883 */
11884 for (i = 0; i < DEV_CNTR_LAST; i++) {
11885 entry = &dev_cntrs[i];
11886 hfi1_cdbg(CNTR, "reading %s", entry->name);
11887 if (entry->flags & CNTR_DISABLED) {
11888 /* Nothing */
11889 hfi1_cdbg(CNTR, "\tDisabled\n");
11890 } else {
11891 if (entry->flags & CNTR_VL) {
11892 hfi1_cdbg(CNTR, "\tPer VL\n");
11893 for (j = 0; j < C_VL_COUNT; j++) {
11894 val = entry->rw_cntr(entry,
11895 dd, j,
11896 CNTR_MODE_R,
11897 0);
11898 hfi1_cdbg(
11899 CNTR,
11900 "\t\tRead 0x%llx for %d\n",
11901 val, j);
11902 dd->cntrs[entry->offset + j] =
11903 val;
11904 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050011905 } else if (entry->flags & CNTR_SDMA) {
11906 hfi1_cdbg(CNTR,
11907 "\t Per SDMA Engine\n");
11908 for (j = 0; j < dd->chip_sdma_engines;
11909 j++) {
11910 val =
11911 entry->rw_cntr(entry, dd, j,
11912 CNTR_MODE_R, 0);
11913 hfi1_cdbg(CNTR,
11914 "\t\tRead 0x%llx for %d\n",
11915 val, j);
11916 dd->cntrs[entry->offset + j] =
11917 val;
11918 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040011919 } else {
11920 val = entry->rw_cntr(entry, dd,
11921 CNTR_INVALID_VL,
11922 CNTR_MODE_R, 0);
11923 dd->cntrs[entry->offset] = val;
11924 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11925 }
11926 }
11927 }
11928 }
11929 return ret;
11930}
11931
11932/*
11933 * Used by sysfs to create files for hfi stats to read
11934 */
Dean Luick582e05c2016-02-18 11:13:01 -080011935u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
Mike Marciniszyn77241052015-07-30 15:17:43 -040011936{
11937 int ret;
11938 u64 val = 0;
11939
11940 if (namep) {
Dean Luick582e05c2016-02-18 11:13:01 -080011941 ret = ppd->dd->portcntrnameslen;
11942 *namep = ppd->dd->portcntrnames;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011943 } else {
11944 const struct cntr_entry *entry;
Mike Marciniszyn77241052015-07-30 15:17:43 -040011945 int i, j;
11946
Dean Luick582e05c2016-02-18 11:13:01 -080011947 ret = ppd->dd->nportcntrs * sizeof(u64);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011948 *cntrp = ppd->cntrs;
11949
11950 for (i = 0; i < PORT_CNTR_LAST; i++) {
11951 entry = &port_cntrs[i];
11952 hfi1_cdbg(CNTR, "reading %s", entry->name);
11953 if (entry->flags & CNTR_DISABLED) {
11954 /* Nothing */
11955 hfi1_cdbg(CNTR, "\tDisabled\n");
11956 continue;
11957 }
11958
11959 if (entry->flags & CNTR_VL) {
11960 hfi1_cdbg(CNTR, "\tPer VL");
11961 for (j = 0; j < C_VL_COUNT; j++) {
11962 val = entry->rw_cntr(entry, ppd, j,
11963 CNTR_MODE_R,
11964 0);
11965 hfi1_cdbg(
11966 CNTR,
11967 "\t\tRead 0x%llx for %d",
11968 val, j);
11969 ppd->cntrs[entry->offset + j] = val;
11970 }
11971 } else {
11972 val = entry->rw_cntr(entry, ppd,
11973 CNTR_INVALID_VL,
11974 CNTR_MODE_R,
11975 0);
11976 ppd->cntrs[entry->offset] = val;
11977 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11978 }
11979 }
11980 }
11981 return ret;
11982}
11983
11984static void free_cntrs(struct hfi1_devdata *dd)
11985{
11986 struct hfi1_pportdata *ppd;
11987 int i;
11988
11989 if (dd->synth_stats_timer.data)
11990 del_timer_sync(&dd->synth_stats_timer);
11991 dd->synth_stats_timer.data = 0;
11992 ppd = (struct hfi1_pportdata *)(dd + 1);
11993 for (i = 0; i < dd->num_pports; i++, ppd++) {
11994 kfree(ppd->cntrs);
11995 kfree(ppd->scntrs);
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080011996 free_percpu(ppd->ibport_data.rvp.rc_acks);
11997 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11998 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
Mike Marciniszyn77241052015-07-30 15:17:43 -040011999 ppd->cntrs = NULL;
12000 ppd->scntrs = NULL;
Dennis Dalessandro4eb06882016-01-19 14:42:39 -080012001 ppd->ibport_data.rvp.rc_acks = NULL;
12002 ppd->ibport_data.rvp.rc_qacks = NULL;
12003 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012004 }
12005 kfree(dd->portcntrnames);
12006 dd->portcntrnames = NULL;
12007 kfree(dd->cntrs);
12008 dd->cntrs = NULL;
12009 kfree(dd->scntrs);
12010 dd->scntrs = NULL;
12011 kfree(dd->cntrnames);
12012 dd->cntrnames = NULL;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012013 if (dd->update_cntr_wq) {
12014 destroy_workqueue(dd->update_cntr_wq);
12015 dd->update_cntr_wq = NULL;
12016 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012017}
12018
Mike Marciniszyn77241052015-07-30 15:17:43 -040012019static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12020 u64 *psval, void *context, int vl)
12021{
12022 u64 val;
12023 u64 sval = *psval;
12024
12025 if (entry->flags & CNTR_DISABLED) {
12026 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12027 return 0;
12028 }
12029
12030 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12031
12032 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12033
12034 /* If its a synthetic counter there is more work we need to do */
12035 if (entry->flags & CNTR_SYNTH) {
12036 if (sval == CNTR_MAX) {
12037 /* No need to read already saturated */
12038 return CNTR_MAX;
12039 }
12040
12041 if (entry->flags & CNTR_32BIT) {
12042 /* 32bit counters can wrap multiple times */
12043 u64 upper = sval >> 32;
12044 u64 lower = (sval << 32) >> 32;
12045
12046 if (lower > val) { /* hw wrapped */
12047 if (upper == CNTR_32BIT_MAX)
12048 val = CNTR_MAX;
12049 else
12050 upper++;
12051 }
12052
12053 if (val != CNTR_MAX)
12054 val = (upper << 32) | val;
12055
12056 } else {
12057 /* If we rolled we are saturated */
12058 if ((val < sval) || (val > CNTR_MAX))
12059 val = CNTR_MAX;
12060 }
12061 }
12062
12063 *psval = val;
12064
12065 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12066
12067 return val;
12068}
12069
12070static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12071 struct cntr_entry *entry,
12072 u64 *psval, void *context, int vl, u64 data)
12073{
12074 u64 val;
12075
12076 if (entry->flags & CNTR_DISABLED) {
12077 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12078 return 0;
12079 }
12080
12081 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12082
12083 if (entry->flags & CNTR_SYNTH) {
12084 *psval = data;
12085 if (entry->flags & CNTR_32BIT) {
12086 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12087 (data << 32) >> 32);
12088 val = data; /* return the full 64bit value */
12089 } else {
12090 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12091 data);
12092 }
12093 } else {
12094 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12095 }
12096
12097 *psval = val;
12098
12099 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12100
12101 return val;
12102}
12103
12104u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12105{
12106 struct cntr_entry *entry;
12107 u64 *sval;
12108
12109 entry = &dev_cntrs[index];
12110 sval = dd->scntrs + entry->offset;
12111
12112 if (vl != CNTR_INVALID_VL)
12113 sval += vl;
12114
12115 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12116}
12117
12118u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12119{
12120 struct cntr_entry *entry;
12121 u64 *sval;
12122
12123 entry = &dev_cntrs[index];
12124 sval = dd->scntrs + entry->offset;
12125
12126 if (vl != CNTR_INVALID_VL)
12127 sval += vl;
12128
12129 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12130}
12131
12132u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12133{
12134 struct cntr_entry *entry;
12135 u64 *sval;
12136
12137 entry = &port_cntrs[index];
12138 sval = ppd->scntrs + entry->offset;
12139
12140 if (vl != CNTR_INVALID_VL)
12141 sval += vl;
12142
12143 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12144 (index <= C_RCV_HDR_OVF_LAST)) {
12145 /* We do not want to bother for disabled contexts */
12146 return 0;
12147 }
12148
12149 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12150}
12151
12152u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12153{
12154 struct cntr_entry *entry;
12155 u64 *sval;
12156
12157 entry = &port_cntrs[index];
12158 sval = ppd->scntrs + entry->offset;
12159
12160 if (vl != CNTR_INVALID_VL)
12161 sval += vl;
12162
12163 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12164 (index <= C_RCV_HDR_OVF_LAST)) {
12165 /* We do not want to bother for disabled contexts */
12166 return 0;
12167 }
12168
12169 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12170}
12171
Tadeusz Struk22546b72017-04-28 10:40:02 -070012172static void do_update_synth_timer(struct work_struct *work)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012173{
12174 u64 cur_tx;
12175 u64 cur_rx;
12176 u64 total_flits;
12177 u8 update = 0;
12178 int i, j, vl;
12179 struct hfi1_pportdata *ppd;
12180 struct cntr_entry *entry;
Tadeusz Struk22546b72017-04-28 10:40:02 -070012181 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12182 update_cntr_work);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012183
12184 /*
12185 * Rather than keep beating on the CSRs pick a minimal set that we can
12186 * check to watch for potential roll over. We can do this by looking at
12187 * the number of flits sent/recv. If the total flits exceeds 32bits then
12188 * we have to iterate all the counters and update.
12189 */
12190 entry = &dev_cntrs[C_DC_RCV_FLITS];
12191 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12192
12193 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12194 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12195
12196 hfi1_cdbg(
12197 CNTR,
12198 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12199 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12200
12201 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12202 /*
12203 * May not be strictly necessary to update but it won't hurt and
12204 * simplifies the logic here.
12205 */
12206 update = 1;
12207 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12208 dd->unit);
12209 } else {
12210 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12211 hfi1_cdbg(CNTR,
12212 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12213 total_flits, (u64)CNTR_32BIT_MAX);
12214 if (total_flits >= CNTR_32BIT_MAX) {
12215 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12216 dd->unit);
12217 update = 1;
12218 }
12219 }
12220
12221 if (update) {
12222 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12223 for (i = 0; i < DEV_CNTR_LAST; i++) {
12224 entry = &dev_cntrs[i];
12225 if (entry->flags & CNTR_VL) {
12226 for (vl = 0; vl < C_VL_COUNT; vl++)
12227 read_dev_cntr(dd, i, vl);
12228 } else {
12229 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12230 }
12231 }
12232 ppd = (struct hfi1_pportdata *)(dd + 1);
12233 for (i = 0; i < dd->num_pports; i++, ppd++) {
12234 for (j = 0; j < PORT_CNTR_LAST; j++) {
12235 entry = &port_cntrs[j];
12236 if (entry->flags & CNTR_VL) {
12237 for (vl = 0; vl < C_VL_COUNT; vl++)
12238 read_port_cntr(ppd, j, vl);
12239 } else {
12240 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12241 }
12242 }
12243 }
12244
12245 /*
12246 * We want the value in the register. The goal is to keep track
12247 * of the number of "ticks" not the counter value. In other
12248 * words if the register rolls we want to notice it and go ahead
12249 * and force an update.
12250 */
12251 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12252 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12253 CNTR_MODE_R, 0);
12254
12255 entry = &dev_cntrs[C_DC_RCV_FLITS];
12256 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12257 CNTR_MODE_R, 0);
12258
12259 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12260 dd->unit, dd->last_tx, dd->last_rx);
12261
12262 } else {
12263 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12264 }
Tadeusz Struk22546b72017-04-28 10:40:02 -070012265}
Mike Marciniszyn77241052015-07-30 15:17:43 -040012266
Tadeusz Struk22546b72017-04-28 10:40:02 -070012267static void update_synth_timer(unsigned long opaque)
12268{
12269 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12270
12271 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
Bart Van Assche48a0cc132016-06-03 12:09:56 -070012272 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012273}
12274
Jianxin Xiong09a79082016-10-25 13:12:40 -070012275#define C_MAX_NAME 16 /* 15 chars + one for /0 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012276static int init_cntrs(struct hfi1_devdata *dd)
12277{
Dean Luickc024c552016-01-11 18:30:57 -050012278 int i, rcv_ctxts, j;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012279 size_t sz;
12280 char *p;
12281 char name[C_MAX_NAME];
12282 struct hfi1_pportdata *ppd;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012283 const char *bit_type_32 = ",32";
12284 const int bit_type_32_sz = strlen(bit_type_32);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012285
12286 /* set up the stats timer; the add_timer is done at the end */
Muhammad Falak R Wani24523a92015-10-25 16:13:23 +053012287 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12288 (unsigned long)dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012289
12290 /***********************/
12291 /* per device counters */
12292 /***********************/
12293
12294 /* size names and determine how many we have*/
12295 dd->ndevcntrs = 0;
12296 sz = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012297
12298 for (i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012299 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12300 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12301 continue;
12302 }
12303
12304 if (dev_cntrs[i].flags & CNTR_VL) {
Dean Luickc024c552016-01-11 18:30:57 -050012305 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012306 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012307 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012308 dev_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012309 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012310 /* Add ",32" for 32-bit counters */
12311 if (dev_cntrs[i].flags & CNTR_32BIT)
12312 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012313 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012314 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012315 }
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012316 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
Dean Luickc024c552016-01-11 18:30:57 -050012317 dev_cntrs[i].offset = dd->ndevcntrs;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012318 for (j = 0; j < dd->chip_sdma_engines; j++) {
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012319 snprintf(name, C_MAX_NAME, "%s%d",
12320 dev_cntrs[i].name, j);
12321 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012322 /* Add ",32" for 32-bit counters */
12323 if (dev_cntrs[i].flags & CNTR_32BIT)
12324 sz += bit_type_32_sz;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012325 sz++;
Vennila Megavannana699c6c2016-01-11 18:30:56 -050012326 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012327 }
12328 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012329 /* +1 for newline. */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012330 sz += strlen(dev_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012331 /* Add ",32" for 32-bit counters */
12332 if (dev_cntrs[i].flags & CNTR_32BIT)
12333 sz += bit_type_32_sz;
Dean Luickc024c552016-01-11 18:30:57 -050012334 dev_cntrs[i].offset = dd->ndevcntrs;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012335 dd->ndevcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012336 }
12337 }
12338
12339 /* allocate space for the counter values */
Dean Luickc024c552016-01-11 18:30:57 -050012340 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012341 if (!dd->cntrs)
12342 goto bail;
12343
Dean Luickc024c552016-01-11 18:30:57 -050012344 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012345 if (!dd->scntrs)
12346 goto bail;
12347
Mike Marciniszyn77241052015-07-30 15:17:43 -040012348 /* allocate space for the counter names */
12349 dd->cntrnameslen = sz;
12350 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12351 if (!dd->cntrnames)
12352 goto bail;
12353
12354 /* fill in the names */
Dean Luickc024c552016-01-11 18:30:57 -050012355 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012356 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12357 /* Nothing */
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012358 } else if (dev_cntrs[i].flags & CNTR_VL) {
12359 for (j = 0; j < C_VL_COUNT; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012360 snprintf(name, C_MAX_NAME, "%s%d",
12361 dev_cntrs[i].name,
12362 vl_from_idx(j));
12363 memcpy(p, name, strlen(name));
12364 p += strlen(name);
12365
12366 /* Counter is 32 bits */
12367 if (dev_cntrs[i].flags & CNTR_32BIT) {
12368 memcpy(p, bit_type_32, bit_type_32_sz);
12369 p += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012370 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012371
Mike Marciniszyn77241052015-07-30 15:17:43 -040012372 *p++ = '\n';
12373 }
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012374 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12375 for (j = 0; j < dd->chip_sdma_engines; j++) {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012376 snprintf(name, C_MAX_NAME, "%s%d",
12377 dev_cntrs[i].name, j);
12378 memcpy(p, name, strlen(name));
12379 p += strlen(name);
12380
12381 /* Counter is 32 bits */
12382 if (dev_cntrs[i].flags & CNTR_32BIT) {
12383 memcpy(p, bit_type_32, bit_type_32_sz);
12384 p += bit_type_32_sz;
12385 }
12386
12387 *p++ = '\n';
12388 }
12389 } else {
12390 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12391 p += strlen(dev_cntrs[i].name);
12392
12393 /* Counter is 32 bits */
12394 if (dev_cntrs[i].flags & CNTR_32BIT) {
12395 memcpy(p, bit_type_32, bit_type_32_sz);
12396 p += bit_type_32_sz;
12397 }
12398
12399 *p++ = '\n';
Mike Marciniszyn77241052015-07-30 15:17:43 -040012400 }
12401 }
12402
12403 /*********************/
12404 /* per port counters */
12405 /*********************/
12406
12407 /*
12408 * Go through the counters for the overflows and disable the ones we
12409 * don't need. This varies based on platform so we need to do it
12410 * dynamically here.
12411 */
12412 rcv_ctxts = dd->num_rcv_contexts;
12413 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12414 i <= C_RCV_HDR_OVF_LAST; i++) {
12415 port_cntrs[i].flags |= CNTR_DISABLED;
12416 }
12417
12418 /* size port counter names and determine how many we have*/
12419 sz = 0;
12420 dd->nportcntrs = 0;
12421 for (i = 0; i < PORT_CNTR_LAST; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012422 if (port_cntrs[i].flags & CNTR_DISABLED) {
12423 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12424 continue;
12425 }
12426
12427 if (port_cntrs[i].flags & CNTR_VL) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012428 port_cntrs[i].offset = dd->nportcntrs;
12429 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012430 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012431 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012432 sz += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012433 /* Add ",32" for 32-bit counters */
12434 if (port_cntrs[i].flags & CNTR_32BIT)
12435 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012436 sz++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012437 dd->nportcntrs++;
12438 }
12439 } else {
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012440 /* +1 for newline */
Mike Marciniszyn77241052015-07-30 15:17:43 -040012441 sz += strlen(port_cntrs[i].name) + 1;
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012442 /* Add ",32" for 32-bit counters */
12443 if (port_cntrs[i].flags & CNTR_32BIT)
12444 sz += bit_type_32_sz;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012445 port_cntrs[i].offset = dd->nportcntrs;
12446 dd->nportcntrs++;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012447 }
12448 }
12449
12450 /* allocate space for the counter names */
12451 dd->portcntrnameslen = sz;
12452 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12453 if (!dd->portcntrnames)
12454 goto bail;
12455
12456 /* fill in port cntr names */
12457 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12458 if (port_cntrs[i].flags & CNTR_DISABLED)
12459 continue;
12460
12461 if (port_cntrs[i].flags & CNTR_VL) {
12462 for (j = 0; j < C_VL_COUNT; j++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012463 snprintf(name, C_MAX_NAME, "%s%d",
Jubin John17fb4f22016-02-14 20:21:52 -080012464 port_cntrs[i].name, vl_from_idx(j));
Mike Marciniszyn77241052015-07-30 15:17:43 -040012465 memcpy(p, name, strlen(name));
12466 p += strlen(name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012467
12468 /* Counter is 32 bits */
12469 if (port_cntrs[i].flags & CNTR_32BIT) {
12470 memcpy(p, bit_type_32, bit_type_32_sz);
12471 p += bit_type_32_sz;
12472 }
12473
Mike Marciniszyn77241052015-07-30 15:17:43 -040012474 *p++ = '\n';
12475 }
12476 } else {
12477 memcpy(p, port_cntrs[i].name,
12478 strlen(port_cntrs[i].name));
12479 p += strlen(port_cntrs[i].name);
Sebastian Sanchez11d2b112016-02-03 14:32:40 -080012480
12481 /* Counter is 32 bits */
12482 if (port_cntrs[i].flags & CNTR_32BIT) {
12483 memcpy(p, bit_type_32, bit_type_32_sz);
12484 p += bit_type_32_sz;
12485 }
12486
Mike Marciniszyn77241052015-07-30 15:17:43 -040012487 *p++ = '\n';
12488 }
12489 }
12490
12491 /* allocate per port storage for counter values */
12492 ppd = (struct hfi1_pportdata *)(dd + 1);
12493 for (i = 0; i < dd->num_pports; i++, ppd++) {
12494 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12495 if (!ppd->cntrs)
12496 goto bail;
12497
12498 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12499 if (!ppd->scntrs)
12500 goto bail;
12501 }
12502
12503 /* CPU counters need to be allocated and zeroed */
12504 if (init_cpu_counters(dd))
12505 goto bail;
12506
Tadeusz Struk22546b72017-04-28 10:40:02 -070012507 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12508 WQ_MEM_RECLAIM, dd->unit);
12509 if (!dd->update_cntr_wq)
12510 goto bail;
12511
12512 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12513
Mike Marciniszyn77241052015-07-30 15:17:43 -040012514 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12515 return 0;
12516bail:
12517 free_cntrs(dd);
12518 return -ENOMEM;
12519}
12520
Mike Marciniszyn77241052015-07-30 15:17:43 -040012521static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12522{
12523 switch (chip_lstate) {
12524 default:
12525 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012526 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12527 chip_lstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012528 /* fall through */
12529 case LSTATE_DOWN:
12530 return IB_PORT_DOWN;
12531 case LSTATE_INIT:
12532 return IB_PORT_INIT;
12533 case LSTATE_ARMED:
12534 return IB_PORT_ARMED;
12535 case LSTATE_ACTIVE:
12536 return IB_PORT_ACTIVE;
12537 }
12538}
12539
12540u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12541{
12542 /* look at the HFI meta-states only */
12543 switch (chip_pstate & 0xf0) {
12544 default:
12545 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012546 chip_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012547 /* fall through */
12548 case PLS_DISABLED:
12549 return IB_PORTPHYSSTATE_DISABLED;
12550 case PLS_OFFLINE:
12551 return OPA_PORTPHYSSTATE_OFFLINE;
12552 case PLS_POLLING:
12553 return IB_PORTPHYSSTATE_POLLING;
12554 case PLS_CONFIGPHY:
12555 return IB_PORTPHYSSTATE_TRAINING;
12556 case PLS_LINKUP:
12557 return IB_PORTPHYSSTATE_LINKUP;
12558 case PLS_PHYTEST:
12559 return IB_PORTPHYSSTATE_PHY_TEST;
12560 }
12561}
12562
12563/* return the OPA port logical state name */
12564const char *opa_lstate_name(u32 lstate)
12565{
12566 static const char * const port_logical_names[] = {
12567 "PORT_NOP",
12568 "PORT_DOWN",
12569 "PORT_INIT",
12570 "PORT_ARMED",
12571 "PORT_ACTIVE",
12572 "PORT_ACTIVE_DEFER",
12573 };
12574 if (lstate < ARRAY_SIZE(port_logical_names))
12575 return port_logical_names[lstate];
12576 return "unknown";
12577}
12578
12579/* return the OPA port physical state name */
12580const char *opa_pstate_name(u32 pstate)
12581{
12582 static const char * const port_physical_names[] = {
12583 "PHYS_NOP",
12584 "reserved1",
12585 "PHYS_POLL",
12586 "PHYS_DISABLED",
12587 "PHYS_TRAINING",
12588 "PHYS_LINKUP",
12589 "PHYS_LINK_ERR_RECOVER",
12590 "PHYS_PHY_TEST",
12591 "reserved8",
12592 "PHYS_OFFLINE",
12593 "PHYS_GANGED",
12594 "PHYS_TEST",
12595 };
12596 if (pstate < ARRAY_SIZE(port_physical_names))
12597 return port_physical_names[pstate];
12598 return "unknown";
12599}
12600
12601/*
12602 * Read the hardware link state and set the driver's cached value of it.
12603 * Return the (new) current value.
12604 */
12605u32 get_logical_state(struct hfi1_pportdata *ppd)
12606{
12607 u32 new_state;
12608
12609 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12610 if (new_state != ppd->lstate) {
12611 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012612 opa_lstate_name(new_state), new_state);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012613 ppd->lstate = new_state;
12614 }
12615 /*
12616 * Set port status flags in the page mapped into userspace
12617 * memory. Do it here to ensure a reliable state - this is
12618 * the only function called by all state handling code.
12619 * Always set the flags due to the fact that the cache value
12620 * might have been changed explicitly outside of this
12621 * function.
12622 */
12623 if (ppd->statusp) {
12624 switch (ppd->lstate) {
12625 case IB_PORT_DOWN:
12626 case IB_PORT_INIT:
12627 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12628 HFI1_STATUS_IB_READY);
12629 break;
12630 case IB_PORT_ARMED:
12631 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12632 break;
12633 case IB_PORT_ACTIVE:
12634 *ppd->statusp |= HFI1_STATUS_IB_READY;
12635 break;
12636 }
12637 }
12638 return ppd->lstate;
12639}
12640
12641/**
12642 * wait_logical_linkstate - wait for an IB link state change to occur
12643 * @ppd: port device
12644 * @state: the state to wait for
12645 * @msecs: the number of milliseconds to wait
12646 *
12647 * Wait up to msecs milliseconds for IB link state change to occur.
12648 * For now, take the easy polling route.
12649 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12650 */
12651static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12652 int msecs)
12653{
12654 unsigned long timeout;
12655
12656 timeout = jiffies + msecs_to_jiffies(msecs);
12657 while (1) {
12658 if (get_logical_state(ppd) == state)
12659 return 0;
12660 if (time_after(jiffies, timeout))
12661 break;
12662 msleep(20);
12663 }
12664 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12665
12666 return -ETIMEDOUT;
12667}
12668
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012669/*
12670 * Read the physical hardware link state and set the driver's cached value
12671 * of it.
12672 */
12673void cache_physical_state(struct hfi1_pportdata *ppd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012674{
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012675 u32 read_pstate;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012676 u32 ib_pstate;
12677
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012678 read_pstate = read_physical_state(ppd->dd);
12679 ib_pstate = chip_to_opa_pstate(ppd->dd, read_pstate);
12680 /* check if OPA pstate changed */
12681 if (chip_to_opa_pstate(ppd->dd, ppd->pstate) != ib_pstate) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012682 dd_dev_info(ppd->dd,
Jubin John17fb4f22016-02-14 20:21:52 -080012683 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12684 __func__, opa_pstate_name(ib_pstate), ib_pstate,
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012685 read_pstate);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012686 }
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070012687 ppd->pstate = read_pstate;
12688}
12689
12690/*
12691 * wait_physical_linkstate - wait for an physical link state change to occur
12692 * @ppd: port device
12693 * @state: the state to wait for
12694 * @msecs: the number of milliseconds to wait
12695 *
12696 * Wait up to msecs milliseconds for physical link state change to occur.
12697 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12698 */
12699static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12700 int msecs)
12701{
12702 unsigned long timeout;
12703
12704 timeout = jiffies + msecs_to_jiffies(msecs);
12705 while (1) {
12706 cache_physical_state(ppd);
12707 if (ppd->pstate == state)
12708 break;
12709 if (time_after(jiffies, timeout)) {
12710 dd_dev_err(ppd->dd,
12711 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
12712 state, ppd->pstate);
12713 return -ETIMEDOUT;
12714 }
12715 usleep_range(1950, 2050); /* sleep 2ms-ish */
12716 }
12717
12718 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012719}
12720
Mike Marciniszyn77241052015-07-30 15:17:43 -040012721#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12722(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12723
12724#define SET_STATIC_RATE_CONTROL_SMASK(r) \
12725(r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12726
Michael J. Ruhl9b60d2c2017-05-04 05:15:09 -070012727void hfi1_init_ctxt(struct send_context *sc)
Mike Marciniszyn77241052015-07-30 15:17:43 -040012728{
Jubin Johnd125a6c2016-02-14 20:19:49 -080012729 if (sc) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012730 struct hfi1_devdata *dd = sc->dd;
12731 u64 reg;
12732 u8 set = (sc->type == SC_USER ?
12733 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12734 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12735 reg = read_kctxt_csr(dd, sc->hw_context,
12736 SEND_CTXT_CHECK_ENABLE);
12737 if (set)
12738 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12739 else
12740 SET_STATIC_RATE_CONTROL_SMASK(reg);
12741 write_kctxt_csr(dd, sc->hw_context,
12742 SEND_CTXT_CHECK_ENABLE, reg);
12743 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012744}
12745
12746int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12747{
12748 int ret = 0;
12749 u64 reg;
12750
12751 if (dd->icode != ICODE_RTL_SILICON) {
12752 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12753 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12754 __func__);
12755 return -EINVAL;
12756 }
12757 reg = read_csr(dd, ASIC_STS_THERM);
12758 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12759 ASIC_STS_THERM_CURR_TEMP_MASK);
12760 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12761 ASIC_STS_THERM_LO_TEMP_MASK);
12762 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12763 ASIC_STS_THERM_HI_TEMP_MASK);
12764 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12765 ASIC_STS_THERM_CRIT_TEMP_MASK);
12766 /* triggers is a 3-bit value - 1 bit per trigger. */
12767 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12768
12769 return ret;
12770}
12771
12772/* ========================================================================= */
12773
12774/*
12775 * Enable/disable chip from delivering interrupts.
12776 */
12777void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12778{
12779 int i;
12780
12781 /*
12782 * In HFI, the mask needs to be 1 to allow interrupts.
12783 */
12784 if (enable) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040012785 /* enable all interrupts */
12786 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012787 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012788
Easwar Hariharan8ebd4cf2016-02-03 14:31:14 -080012789 init_qsfp_int(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012790 } else {
12791 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012792 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012793 }
12794}
12795
12796/*
12797 * Clear all interrupt sources on the chip.
12798 */
12799static void clear_all_interrupts(struct hfi1_devdata *dd)
12800{
12801 int i;
12802
12803 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080012804 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012805
12806 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12807 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12808 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12809 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12810 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12811 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12812 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12813 for (i = 0; i < dd->chip_send_contexts; i++)
12814 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12815 for (i = 0; i < dd->chip_sdma_engines; i++)
12816 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12817
12818 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12819 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12820 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12821}
12822
12823/* Move to pcie.c? */
12824static void disable_intx(struct pci_dev *pdev)
12825{
12826 pci_intx(pdev, 0);
12827}
12828
12829static void clean_up_interrupts(struct hfi1_devdata *dd)
12830{
12831 int i;
12832
12833 /* remove irqs - must happen before disabling/turning off */
12834 if (dd->num_msix_entries) {
12835 /* MSI-X */
12836 struct hfi1_msix_entry *me = dd->msix_entries;
12837
12838 for (i = 0; i < dd->num_msix_entries; i++, me++) {
Jubin Johnd125a6c2016-02-14 20:19:49 -080012839 if (!me->arg) /* => no irq, no affinity */
Mitko Haralanov957558c2016-02-03 14:33:40 -080012840 continue;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012841 hfi1_put_irq_affinity(dd, me);
12842 free_irq(me->irq, me->arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012843 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012844
12845 /* clean structures */
12846 kfree(dd->msix_entries);
12847 dd->msix_entries = NULL;
12848 dd->num_msix_entries = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012849 } else {
12850 /* INTx */
12851 if (dd->requested_intx_irq) {
12852 free_irq(dd->pcidev->irq, dd);
12853 dd->requested_intx_irq = 0;
12854 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012855 disable_intx(dd->pcidev);
12856 }
12857
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070012858 pci_free_irq_vectors(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012859}
12860
12861/*
12862 * Remap the interrupt source from the general handler to the given MSI-X
12863 * interrupt.
12864 */
12865static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12866{
12867 u64 reg;
12868 int m, n;
12869
12870 /* clear from the handled mask of the general interrupt */
12871 m = isrc / 64;
12872 n = isrc % 64;
Dennis Dalessandrobc54f672017-05-29 17:18:14 -070012873 if (likely(m < CCE_NUM_INT_CSRS)) {
12874 dd->gi_mask[m] &= ~((u64)1 << n);
12875 } else {
12876 dd_dev_err(dd, "remap interrupt err\n");
12877 return;
12878 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012879
12880 /* direct the chip source to the given MSI-X interrupt */
12881 m = isrc / 8;
12882 n = isrc % 8;
Jubin John8638b772016-02-14 20:19:24 -080012883 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12884 reg &= ~((u64)0xff << (8 * n));
12885 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12886 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012887}
12888
12889static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12890 int engine, int msix_intr)
12891{
12892 /*
12893 * SDMA engine interrupt sources grouped by type, rather than
12894 * engine. Per-engine interrupts are as follows:
12895 * SDMA
12896 * SDMAProgress
12897 * SDMAIdle
12898 */
Jubin John8638b772016-02-14 20:19:24 -080012899 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012900 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012901 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012902 msix_intr);
Jubin John8638b772016-02-14 20:19:24 -080012903 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
Jubin John17fb4f22016-02-14 20:21:52 -080012904 msix_intr);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012905}
12906
Mike Marciniszyn77241052015-07-30 15:17:43 -040012907static int request_intx_irq(struct hfi1_devdata *dd)
12908{
12909 int ret;
12910
Jubin John98050712015-11-16 21:59:27 -050012911 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12912 dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012913 ret = request_irq(dd->pcidev->irq, general_interrupt,
Jubin John17fb4f22016-02-14 20:21:52 -080012914 IRQF_SHARED, dd->intx_name, dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012915 if (ret)
12916 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
Jubin John17fb4f22016-02-14 20:21:52 -080012917 ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012918 else
12919 dd->requested_intx_irq = 1;
12920 return ret;
12921}
12922
12923static int request_msix_irqs(struct hfi1_devdata *dd)
12924{
Mike Marciniszyn77241052015-07-30 15:17:43 -040012925 int first_general, last_general;
12926 int first_sdma, last_sdma;
12927 int first_rx, last_rx;
Mitko Haralanov957558c2016-02-03 14:33:40 -080012928 int i, ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012929
12930 /* calculate the ranges we are going to use */
12931 first_general = 0;
Jubin Johnf3ff8182016-02-14 20:20:50 -080012932 last_general = first_general + 1;
12933 first_sdma = last_general;
12934 last_sdma = first_sdma + dd->num_sdma;
12935 first_rx = last_sdma;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012936 last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
12937
12938 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
12939 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012940
12941 /*
Mike Marciniszyn77241052015-07-30 15:17:43 -040012942 * Sanity check - the code expects all SDMA chip source
12943 * interrupts to be in the same CSR, starting at bit 0. Verify
12944 * that this is true by checking the bit location of the start.
12945 */
12946 BUILD_BUG_ON(IS_SDMA_START % 64);
12947
12948 for (i = 0; i < dd->num_msix_entries; i++) {
12949 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12950 const char *err_info;
12951 irq_handler_t handler;
Dean Luickf4f30031c2015-10-26 10:28:44 -040012952 irq_handler_t thread = NULL;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012953 void *arg = NULL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012954 int idx;
12955 struct hfi1_ctxtdata *rcd = NULL;
12956 struct sdma_engine *sde = NULL;
12957
12958 /* obtain the arguments to request_irq */
12959 if (first_general <= i && i < last_general) {
12960 idx = i - first_general;
12961 handler = general_interrupt;
12962 arg = dd;
12963 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012964 DRIVER_NAME "_%d", dd->unit);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012965 err_info = "general";
Mitko Haralanov957558c2016-02-03 14:33:40 -080012966 me->type = IRQ_GENERAL;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012967 } else if (first_sdma <= i && i < last_sdma) {
12968 idx = i - first_sdma;
12969 sde = &dd->per_sdma[idx];
12970 handler = sdma_interrupt;
12971 arg = sde;
12972 snprintf(me->name, sizeof(me->name),
Jubin John98050712015-11-16 21:59:27 -050012973 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
Mike Marciniszyn77241052015-07-30 15:17:43 -040012974 err_info = "sdma";
12975 remap_sdma_interrupts(dd, idx, i);
Mitko Haralanov957558c2016-02-03 14:33:40 -080012976 me->type = IRQ_SDMA;
Mike Marciniszyn77241052015-07-30 15:17:43 -040012977 } else if (first_rx <= i && i < last_rx) {
12978 idx = i - first_rx;
12979 rcd = dd->rcd[idx];
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070012980 if (rcd) {
12981 /*
12982 * Set the interrupt register and mask for this
12983 * context's interrupt.
12984 */
12985 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12986 rcd->imask = ((u64)1) <<
12987 ((IS_RCVAVAIL_START + idx) % 64);
12988 handler = receive_context_interrupt;
12989 thread = receive_context_thread;
12990 arg = rcd;
12991 snprintf(me->name, sizeof(me->name),
12992 DRIVER_NAME "_%d kctxt%d",
12993 dd->unit, idx);
12994 err_info = "receive context";
12995 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12996 me->type = IRQ_RCVCTXT;
12997 rcd->msix_intr = i;
12998 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040012999 } else {
13000 /* not in our expected range - complain, then
Jubin John4d114fd2016-02-14 20:21:43 -080013001 * ignore it
13002 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013003 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013004 "Unexpected extra MSI-X interrupt %d\n", i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013005 continue;
13006 }
13007 /* no argument, no interrupt */
Jubin Johnd125a6c2016-02-14 20:19:49 -080013008 if (!arg)
Mike Marciniszyn77241052015-07-30 15:17:43 -040013009 continue;
13010 /* make sure the name is terminated */
Jubin John8638b772016-02-14 20:19:24 -080013011 me->name[sizeof(me->name) - 1] = 0;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013012 me->irq = pci_irq_vector(dd->pcidev, i);
13013 /*
13014 * On err return me->irq. Don't need to clear this
13015 * because 'arg' has not been set, and cleanup will
13016 * do the right thing.
13017 */
13018 if (me->irq < 0)
13019 return me->irq;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013020
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013021 ret = request_threaded_irq(me->irq, handler, thread, 0,
Jubin John17fb4f22016-02-14 20:21:52 -080013022 me->name, arg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013023 if (ret) {
13024 dd_dev_err(dd,
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013025 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13026 err_info, me->irq, idx, ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013027 return ret;
13028 }
13029 /*
13030 * assign arg after request_irq call, so it will be
13031 * cleaned up
13032 */
13033 me->arg = arg;
13034
Mitko Haralanov957558c2016-02-03 14:33:40 -080013035 ret = hfi1_get_irq_affinity(dd, me);
13036 if (ret)
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013037 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013038 }
13039
Mike Marciniszyn77241052015-07-30 15:17:43 -040013040 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013041}
13042
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013043void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13044{
13045 int i;
13046
13047 if (!dd->num_msix_entries) {
13048 synchronize_irq(dd->pcidev->irq);
13049 return;
13050 }
13051
13052 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13053 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13054 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13055
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013056 synchronize_irq(me->irq);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013057 }
13058}
13059
13060void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13061{
13062 struct hfi1_devdata *dd = rcd->dd;
13063 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13064
13065 if (!me->arg) /* => no irq, no affinity */
13066 return;
13067
13068 hfi1_put_irq_affinity(dd, me);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013069 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013070
13071 me->arg = NULL;
13072}
13073
13074void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13075{
13076 struct hfi1_devdata *dd = rcd->dd;
13077 struct hfi1_msix_entry *me;
13078 int idx = rcd->ctxt;
13079 void *arg = rcd;
13080 int ret;
13081
13082 rcd->msix_intr = dd->vnic.msix_idx++;
13083 me = &dd->msix_entries[rcd->msix_intr];
13084
13085 /*
13086 * Set the interrupt register and mask for this
13087 * context's interrupt.
13088 */
13089 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13090 rcd->imask = ((u64)1) <<
13091 ((IS_RCVAVAIL_START + idx) % 64);
13092
13093 snprintf(me->name, sizeof(me->name),
13094 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13095 me->name[sizeof(me->name) - 1] = 0;
13096 me->type = IRQ_RCVCTXT;
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013097 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13098 if (me->irq < 0) {
13099 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13100 idx, me->irq);
13101 return;
13102 }
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013103 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13104
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013105 ret = request_threaded_irq(me->irq, receive_context_interrupt,
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013106 receive_context_thread, 0, me->name, arg);
13107 if (ret) {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013108 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13109 me->irq, idx, ret);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013110 return;
13111 }
13112 /*
13113 * assign arg after request_irq call, so it will be
13114 * cleaned up
13115 */
13116 me->arg = arg;
13117
13118 ret = hfi1_get_irq_affinity(dd, me);
13119 if (ret) {
13120 dd_dev_err(dd,
13121 "unable to pin IRQ %d\n", ret);
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013122 free_irq(me->irq, me->arg);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013123 }
13124}
13125
Mike Marciniszyn77241052015-07-30 15:17:43 -040013126/*
13127 * Set the general handler to accept all interrupts, remap all
13128 * chip interrupts back to MSI-X 0.
13129 */
13130static void reset_interrupts(struct hfi1_devdata *dd)
13131{
13132 int i;
13133
13134 /* all interrupts handled by the general handler */
13135 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13136 dd->gi_mask[i] = ~(u64)0;
13137
13138 /* all chip interrupts map to MSI-X 0 */
13139 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013140 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013141}
13142
13143static int set_up_interrupts(struct hfi1_devdata *dd)
13144{
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013145 u32 total;
13146 int ret, request;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013147 int single_interrupt = 0; /* we expect to have all the interrupts */
13148
13149 /*
13150 * Interrupt count:
13151 * 1 general, "slow path" interrupt (includes the SDMA engines
13152 * slow source, SDMACleanupDone)
13153 * N interrupts - one per used SDMA engine
13154 * M interrupt - one per kernel receive context
13155 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013156 total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013157
Mike Marciniszyn77241052015-07-30 15:17:43 -040013158 /* ask for MSI-X interrupts */
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013159 request = request_msix(dd, total);
13160 if (request < 0) {
13161 ret = request;
13162 goto fail;
13163 } else if (request == 0) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013164 /* using INTx */
13165 /* dd->num_msix_entries already zero */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013166 single_interrupt = 1;
13167 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013168 } else if (request < total) {
13169 /* using MSI-X, with reduced interrupts */
13170 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13171 total, request);
13172 ret = -EINVAL;
13173 goto fail;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013174 } else {
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013175 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13176 GFP_KERNEL);
13177 if (!dd->msix_entries) {
13178 ret = -ENOMEM;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013179 goto fail;
13180 }
Michael J. Ruhlbb7dde82017-05-26 05:35:31 -070013181 /* using MSI-X */
13182 dd->num_msix_entries = total;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013183 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13184 }
13185
13186 /* mask all interrupts */
13187 set_intr_state(dd, 0);
13188 /* clear all pending interrupts */
13189 clear_all_interrupts(dd);
13190
13191 /* reset general handler mask, chip MSI-X mappings */
13192 reset_interrupts(dd);
13193
13194 if (single_interrupt)
13195 ret = request_intx_irq(dd);
13196 else
13197 ret = request_msix_irqs(dd);
13198 if (ret)
13199 goto fail;
13200
13201 return 0;
13202
13203fail:
13204 clean_up_interrupts(dd);
13205 return ret;
13206}
13207
13208/*
13209 * Set up context values in dd. Sets:
13210 *
13211 * num_rcv_contexts - number of contexts being used
13212 * n_krcv_queues - number of kernel contexts
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013213 * first_dyn_alloc_ctxt - first dynamically allocated context
13214 * in array of contexts
Mike Marciniszyn77241052015-07-30 15:17:43 -040013215 * freectxts - number of free user contexts
13216 * num_send_contexts - number of PIO send contexts being used
13217 */
13218static int set_up_context_variables(struct hfi1_devdata *dd)
13219{
Harish Chegondi429b6a72016-08-31 07:24:40 -070013220 unsigned long num_kernel_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013221 int total_contexts;
13222 int ret;
13223 unsigned ngroups;
Dean Luick8f000f72016-04-12 11:32:06 -070013224 int qos_rmt_count;
13225 int user_rmt_reduced;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013226
13227 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013228 * Kernel receive contexts:
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013229 * - Context 0 - control context (VL15/multicast/error)
Dean Luick33a9eb52016-04-12 10:50:22 -070013230 * - Context 1 - first kernel context
13231 * - Context 2 - second kernel context
13232 * ...
Mike Marciniszyn77241052015-07-30 15:17:43 -040013233 */
13234 if (n_krcvqs)
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013235 /*
Dean Luick33a9eb52016-04-12 10:50:22 -070013236 * n_krcvqs is the sum of module parameter kernel receive
13237 * contexts, krcvqs[]. It does not include the control
13238 * context, so add that.
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050013239 */
Dean Luick33a9eb52016-04-12 10:50:22 -070013240 num_kernel_contexts = n_krcvqs + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013241 else
Harish Chegondi8784ac02016-07-25 13:38:50 -070013242 num_kernel_contexts = DEFAULT_KRCVQS + 1;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013243 /*
13244 * Every kernel receive context needs an ACK send context.
13245 * one send context is allocated for each VL{0-7} and VL15
13246 */
13247 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13248 dd_dev_err(dd,
Harish Chegondi429b6a72016-08-31 07:24:40 -070013249 "Reducing # kernel rcv contexts to: %d, from %lu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013250 (int)(dd->chip_send_contexts - num_vls - 1),
Harish Chegondi429b6a72016-08-31 07:24:40 -070013251 num_kernel_contexts);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013252 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13253 }
13254 /*
Jubin John0852d242016-04-12 11:30:08 -070013255 * User contexts:
13256 * - default to 1 user context per real (non-HT) CPU core if
13257 * num_user_contexts is negative
Mike Marciniszyn77241052015-07-30 15:17:43 -040013258 */
Sebastian Sanchez2ce6bf22015-12-11 08:44:48 -050013259 if (num_user_contexts < 0)
Jubin John0852d242016-04-12 11:30:08 -070013260 num_user_contexts =
Dennis Dalessandro41973442016-07-25 07:52:36 -070013261 cpumask_weight(&node_affinity.real_cpu_mask);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013262
13263 total_contexts = num_kernel_contexts + num_user_contexts;
13264
13265 /*
13266 * Adjust the counts given a global max.
13267 */
13268 if (total_contexts > dd->chip_rcv_contexts) {
13269 dd_dev_err(dd,
13270 "Reducing # user receive contexts to: %d, from %d\n",
13271 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13272 (int)num_user_contexts);
13273 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13274 /* recalculate */
13275 total_contexts = num_kernel_contexts + num_user_contexts;
13276 }
13277
Dean Luick8f000f72016-04-12 11:32:06 -070013278 /* each user context requires an entry in the RMT */
13279 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13280 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13281 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13282 dd_dev_err(dd,
13283 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13284 (int)num_user_contexts,
13285 user_rmt_reduced);
13286 /* recalculate */
13287 num_user_contexts = user_rmt_reduced;
13288 total_contexts = num_kernel_contexts + num_user_contexts;
13289 }
13290
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013291 /* Accommodate VNIC contexts */
13292 if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13293 total_contexts += HFI1_NUM_VNIC_CTXT;
13294
13295 /* the first N are kernel contexts, the rest are user/vnic contexts */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013296 dd->num_rcv_contexts = total_contexts;
13297 dd->n_krcv_queues = num_kernel_contexts;
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013298 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080013299 dd->num_user_contexts = num_user_contexts;
Mike Marciniszyn77241052015-07-30 15:17:43 -040013300 dd->freectxts = num_user_contexts;
13301 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013302 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13303 (int)dd->chip_rcv_contexts,
13304 (int)dd->num_rcv_contexts,
13305 (int)dd->n_krcv_queues,
13306 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013307
13308 /*
13309 * Receive array allocation:
13310 * All RcvArray entries are divided into groups of 8. This
13311 * is required by the hardware and will speed up writes to
13312 * consecutive entries by using write-combining of the entire
13313 * cacheline.
13314 *
13315 * The number of groups are evenly divided among all contexts.
13316 * any left over groups will be given to the first N user
13317 * contexts.
13318 */
13319 dd->rcv_entries.group_size = RCV_INCREMENT;
13320 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13321 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13322 dd->rcv_entries.nctxt_extra = ngroups -
13323 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13324 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13325 dd->rcv_entries.ngroups,
13326 dd->rcv_entries.nctxt_extra);
13327 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13328 MAX_EAGER_ENTRIES * 2) {
13329 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13330 dd->rcv_entries.group_size;
13331 dd_dev_info(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013332 "RcvArray group count too high, change to %u\n",
13333 dd->rcv_entries.ngroups);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013334 dd->rcv_entries.nctxt_extra = 0;
13335 }
13336 /*
13337 * PIO send contexts
13338 */
13339 ret = init_sc_pools_and_sizes(dd);
13340 if (ret >= 0) { /* success */
13341 dd->num_send_contexts = ret;
13342 dd_dev_info(
13343 dd,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013344 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040013345 dd->chip_send_contexts,
13346 dd->num_send_contexts,
13347 dd->sc_sizes[SC_KERNEL].count,
13348 dd->sc_sizes[SC_ACK].count,
Jianxin Xiong44306f12016-04-12 11:30:28 -070013349 dd->sc_sizes[SC_USER].count,
13350 dd->sc_sizes[SC_VL15].count);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013351 ret = 0; /* success */
13352 }
13353
13354 return ret;
13355}
13356
13357/*
13358 * Set the device/port partition key table. The MAD code
13359 * will ensure that, at least, the partial management
13360 * partition key is present in the table.
13361 */
13362static void set_partition_keys(struct hfi1_pportdata *ppd)
13363{
13364 struct hfi1_devdata *dd = ppd->dd;
13365 u64 reg = 0;
13366 int i;
13367
13368 dd_dev_info(dd, "Setting partition keys\n");
13369 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13370 reg |= (ppd->pkeys[i] &
13371 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13372 ((i % 4) *
13373 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13374 /* Each register holds 4 PKey values. */
13375 if ((i % 4) == 3) {
13376 write_csr(dd, RCV_PARTITION_KEY +
13377 ((i - 3) * 2), reg);
13378 reg = 0;
13379 }
13380 }
13381
13382 /* Always enable HW pkeys check when pkeys table is set */
13383 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13384}
13385
13386/*
13387 * These CSRs and memories are uninitialized on reset and must be
13388 * written before reading to set the ECC/parity bits.
13389 *
13390 * NOTE: All user context CSRs that are not mmaped write-only
13391 * (e.g. the TID flows) must be initialized even if the driver never
13392 * reads them.
13393 */
13394static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13395{
13396 int i, j;
13397
13398 /* CceIntMap */
13399 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013400 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013401
13402 /* SendCtxtCreditReturnAddr */
13403 for (i = 0; i < dd->chip_send_contexts; i++)
13404 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13405
13406 /* PIO Send buffers */
13407 /* SDMA Send buffers */
Jubin John4d114fd2016-02-14 20:21:43 -080013408 /*
13409 * These are not normally read, and (presently) have no method
13410 * to be read, so are not pre-initialized
13411 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013412
13413 /* RcvHdrAddr */
13414 /* RcvHdrTailAddr */
13415 /* RcvTidFlowTable */
13416 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13417 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13418 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13419 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
Jubin John8638b772016-02-14 20:19:24 -080013420 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013421 }
13422
13423 /* RcvArray */
13424 for (i = 0; i < dd->chip_rcv_array_count; i++)
Jubin John8638b772016-02-14 20:19:24 -080013425 write_csr(dd, RCV_ARRAY + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013426 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013427
13428 /* RcvQPMapTable */
13429 for (i = 0; i < 32; i++)
13430 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13431}
13432
13433/*
13434 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13435 */
13436static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13437 u64 ctrl_bits)
13438{
13439 unsigned long timeout;
13440 u64 reg;
13441
13442 /* is the condition present? */
13443 reg = read_csr(dd, CCE_STATUS);
13444 if ((reg & status_bits) == 0)
13445 return;
13446
13447 /* clear the condition */
13448 write_csr(dd, CCE_CTRL, ctrl_bits);
13449
13450 /* wait for the condition to clear */
13451 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13452 while (1) {
13453 reg = read_csr(dd, CCE_STATUS);
13454 if ((reg & status_bits) == 0)
13455 return;
13456 if (time_after(jiffies, timeout)) {
13457 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013458 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13459 status_bits, reg & status_bits);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013460 return;
13461 }
13462 udelay(1);
13463 }
13464}
13465
13466/* set CCE CSRs to chip reset defaults */
13467static void reset_cce_csrs(struct hfi1_devdata *dd)
13468{
13469 int i;
13470
13471 /* CCE_REVISION read-only */
13472 /* CCE_REVISION2 read-only */
13473 /* CCE_CTRL - bits clear automatically */
13474 /* CCE_STATUS read-only, use CceCtrl to clear */
13475 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13476 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13477 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13478 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13479 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13480 /* CCE_ERR_STATUS read-only */
13481 write_csr(dd, CCE_ERR_MASK, 0);
13482 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13483 /* CCE_ERR_FORCE leave alone */
13484 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13485 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13486 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13487 /* CCE_PCIE_CTRL leave alone */
13488 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13489 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13490 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
Jubin John17fb4f22016-02-14 20:21:52 -080013491 CCE_MSIX_TABLE_UPPER_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013492 }
13493 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13494 /* CCE_MSIX_PBA read-only */
13495 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13496 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13497 }
13498 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13499 write_csr(dd, CCE_INT_MAP, 0);
13500 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13501 /* CCE_INT_STATUS read-only */
13502 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13503 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13504 /* CCE_INT_FORCE leave alone */
13505 /* CCE_INT_BLOCKED read-only */
13506 }
13507 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13508 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13509}
13510
Mike Marciniszyn77241052015-07-30 15:17:43 -040013511/* set MISC CSRs to chip reset defaults */
13512static void reset_misc_csrs(struct hfi1_devdata *dd)
13513{
13514 int i;
13515
13516 for (i = 0; i < 32; i++) {
13517 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13518 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13519 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13520 }
Jubin John4d114fd2016-02-14 20:21:43 -080013521 /*
13522 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13523 * only be written 128-byte chunks
13524 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040013525 /* init RSA engine to clear lingering errors */
13526 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13527 write_csr(dd, MISC_CFG_RSA_MU, 0);
13528 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13529 /* MISC_STS_8051_DIGEST read-only */
13530 /* MISC_STS_SBM_DIGEST read-only */
13531 /* MISC_STS_PCIE_DIGEST read-only */
13532 /* MISC_STS_FAB_DIGEST read-only */
13533 /* MISC_ERR_STATUS read-only */
13534 write_csr(dd, MISC_ERR_MASK, 0);
13535 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13536 /* MISC_ERR_FORCE leave alone */
13537}
13538
13539/* set TXE CSRs to chip reset defaults */
13540static void reset_txe_csrs(struct hfi1_devdata *dd)
13541{
13542 int i;
13543
13544 /*
13545 * TXE Kernel CSRs
13546 */
13547 write_csr(dd, SEND_CTRL, 0);
13548 __cm_reset(dd, 0); /* reset CM internal state */
13549 /* SEND_CONTEXTS read-only */
13550 /* SEND_DMA_ENGINES read-only */
13551 /* SEND_PIO_MEM_SIZE read-only */
13552 /* SEND_DMA_MEM_SIZE read-only */
13553 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13554 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13555 /* SEND_PIO_ERR_STATUS read-only */
13556 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13557 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13558 /* SEND_PIO_ERR_FORCE leave alone */
13559 /* SEND_DMA_ERR_STATUS read-only */
13560 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13561 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13562 /* SEND_DMA_ERR_FORCE leave alone */
13563 /* SEND_EGRESS_ERR_STATUS read-only */
13564 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13565 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13566 /* SEND_EGRESS_ERR_FORCE leave alone */
13567 write_csr(dd, SEND_BTH_QP, 0);
13568 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13569 write_csr(dd, SEND_SC2VLT0, 0);
13570 write_csr(dd, SEND_SC2VLT1, 0);
13571 write_csr(dd, SEND_SC2VLT2, 0);
13572 write_csr(dd, SEND_SC2VLT3, 0);
13573 write_csr(dd, SEND_LEN_CHECK0, 0);
13574 write_csr(dd, SEND_LEN_CHECK1, 0);
13575 /* SEND_ERR_STATUS read-only */
13576 write_csr(dd, SEND_ERR_MASK, 0);
13577 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13578 /* SEND_ERR_FORCE read-only */
13579 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013580 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013581 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
Jubin John8638b772016-02-14 20:19:24 -080013582 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13583 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13584 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013585 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013586 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013587 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
Jubin John8638b772016-02-14 20:19:24 -080013588 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013589 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
Jubin John17fb4f22016-02-14 20:21:52 -080013590 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013591 /* SEND_CM_CREDIT_USED_STATUS read-only */
13592 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13593 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13594 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13595 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13596 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13597 for (i = 0; i < TXE_NUM_DATA_VL; i++)
Jubin John8638b772016-02-14 20:19:24 -080013598 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013599 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13600 /* SEND_CM_CREDIT_USED_VL read-only */
13601 /* SEND_CM_CREDIT_USED_VL15 read-only */
13602 /* SEND_EGRESS_CTXT_STATUS read-only */
13603 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13604 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13605 /* SEND_EGRESS_ERR_INFO read-only */
13606 /* SEND_EGRESS_ERR_SOURCE read-only */
13607
13608 /*
13609 * TXE Per-Context CSRs
13610 */
13611 for (i = 0; i < dd->chip_send_contexts; i++) {
13612 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13613 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13614 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13615 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13616 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13617 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13618 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13619 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13620 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13621 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13622 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13623 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13624 }
13625
13626 /*
13627 * TXE Per-SDMA CSRs
13628 */
13629 for (i = 0; i < dd->chip_sdma_engines; i++) {
13630 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13631 /* SEND_DMA_STATUS read-only */
13632 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13633 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13634 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13635 /* SEND_DMA_HEAD read-only */
13636 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13637 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13638 /* SEND_DMA_IDLE_CNT read-only */
13639 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13640 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13641 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13642 /* SEND_DMA_ENG_ERR_STATUS read-only */
13643 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13644 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13645 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13646 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13647 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13648 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13649 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13650 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13651 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13652 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13653 }
13654}
13655
13656/*
13657 * Expect on entry:
13658 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13659 */
13660static void init_rbufs(struct hfi1_devdata *dd)
13661{
13662 u64 reg;
13663 int count;
13664
13665 /*
13666 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13667 * clear.
13668 */
13669 count = 0;
13670 while (1) {
13671 reg = read_csr(dd, RCV_STATUS);
13672 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13673 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13674 break;
13675 /*
13676 * Give up after 1ms - maximum wait time.
13677 *
Harish Chegondie8a70af2016-09-25 07:42:01 -070013678 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
Mike Marciniszyn77241052015-07-30 15:17:43 -040013679 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
Harish Chegondie8a70af2016-09-25 07:42:01 -070013680 * 136 KB / (66% * 250MB/s) = 844us
Mike Marciniszyn77241052015-07-30 15:17:43 -040013681 */
13682 if (count++ > 500) {
13683 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013684 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13685 __func__, reg);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013686 break;
13687 }
13688 udelay(2); /* do not busy-wait the CSR */
13689 }
13690
13691 /* start the init - expect RcvCtrl to be 0 */
13692 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13693
13694 /*
13695 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13696 * period after the write before RcvStatus.RxRbufInitDone is valid.
13697 * The delay in the first run through the loop below is sufficient and
13698 * required before the first read of RcvStatus.RxRbufInintDone.
13699 */
13700 read_csr(dd, RCV_CTRL);
13701
13702 /* wait for the init to finish */
13703 count = 0;
13704 while (1) {
13705 /* delay is required first time through - see above */
13706 udelay(2); /* do not busy-wait the CSR */
13707 reg = read_csr(dd, RCV_STATUS);
13708 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13709 break;
13710
13711 /* give up after 100us - slowest possible at 33MHz is 73us */
13712 if (count++ > 50) {
13713 dd_dev_err(dd,
Jubin John17fb4f22016-02-14 20:21:52 -080013714 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13715 __func__);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013716 break;
13717 }
13718 }
13719}
13720
13721/* set RXE CSRs to chip reset defaults */
13722static void reset_rxe_csrs(struct hfi1_devdata *dd)
13723{
13724 int i, j;
13725
13726 /*
13727 * RXE Kernel CSRs
13728 */
13729 write_csr(dd, RCV_CTRL, 0);
13730 init_rbufs(dd);
13731 /* RCV_STATUS read-only */
13732 /* RCV_CONTEXTS read-only */
13733 /* RCV_ARRAY_CNT read-only */
13734 /* RCV_BUF_SIZE read-only */
13735 write_csr(dd, RCV_BTH_QP, 0);
13736 write_csr(dd, RCV_MULTICAST, 0);
13737 write_csr(dd, RCV_BYPASS, 0);
13738 write_csr(dd, RCV_VL15, 0);
13739 /* this is a clear-down */
13740 write_csr(dd, RCV_ERR_INFO,
Jubin John17fb4f22016-02-14 20:21:52 -080013741 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013742 /* RCV_ERR_STATUS read-only */
13743 write_csr(dd, RCV_ERR_MASK, 0);
13744 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13745 /* RCV_ERR_FORCE leave alone */
13746 for (i = 0; i < 32; i++)
13747 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13748 for (i = 0; i < 4; i++)
13749 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13750 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13751 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13752 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13753 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070013754 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13755 clear_rsm_rule(dd, i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013756 for (i = 0; i < 32; i++)
13757 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13758
13759 /*
13760 * RXE Kernel and User Per-Context CSRs
13761 */
13762 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13763 /* kernel */
13764 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13765 /* RCV_CTXT_STATUS read-only */
13766 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13767 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13768 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13769 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13770 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13771 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13772 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13773 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13774 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13775 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13776
13777 /* user */
13778 /* RCV_HDR_TAIL read-only */
13779 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13780 /* RCV_EGR_INDEX_TAIL read-only */
13781 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13782 /* RCV_EGR_OFFSET_TAIL read-only */
13783 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
Jubin John17fb4f22016-02-14 20:21:52 -080013784 write_uctxt_csr(dd, i,
13785 RCV_TID_FLOW_TABLE + (8 * j), 0);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013786 }
13787 }
13788}
13789
13790/*
13791 * Set sc2vl tables.
13792 *
13793 * They power on to zeros, so to avoid send context errors
13794 * they need to be set:
13795 *
13796 * SC 0-7 -> VL 0-7 (respectively)
13797 * SC 15 -> VL 15
13798 * otherwise
13799 * -> VL 0
13800 */
13801static void init_sc2vl_tables(struct hfi1_devdata *dd)
13802{
13803 int i;
13804 /* init per architecture spec, constrained by hardware capability */
13805
13806 /* HFI maps sent packets */
13807 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13808 0,
13809 0, 0, 1, 1,
13810 2, 2, 3, 3,
13811 4, 4, 5, 5,
13812 6, 6, 7, 7));
13813 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13814 1,
13815 8, 0, 9, 0,
13816 10, 0, 11, 0,
13817 12, 0, 13, 0,
13818 14, 0, 15, 15));
13819 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13820 2,
13821 16, 0, 17, 0,
13822 18, 0, 19, 0,
13823 20, 0, 21, 0,
13824 22, 0, 23, 0));
13825 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13826 3,
13827 24, 0, 25, 0,
13828 26, 0, 27, 0,
13829 28, 0, 29, 0,
13830 30, 0, 31, 0));
13831
13832 /* DC maps received packets */
13833 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13834 15_0,
13835 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13836 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13837 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13838 31_16,
13839 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13840 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13841
13842 /* initialize the cached sc2vl values consistently with h/w */
13843 for (i = 0; i < 32; i++) {
13844 if (i < 8 || i == 15)
13845 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13846 else
13847 *((u8 *)(dd->sc2vl) + i) = 0;
13848 }
13849}
13850
13851/*
13852 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13853 * depend on the chip going through a power-on reset - a driver may be loaded
13854 * and unloaded many times.
13855 *
13856 * Do not write any CSR values to the chip in this routine - there may be
13857 * a reset following the (possible) FLR in this routine.
13858 *
13859 */
13860static void init_chip(struct hfi1_devdata *dd)
13861{
13862 int i;
13863
13864 /*
13865 * Put the HFI CSRs in a known state.
13866 * Combine this with a DC reset.
13867 *
13868 * Stop the device from doing anything while we do a
13869 * reset. We know there are no other active users of
13870 * the device since we are now in charge. Turn off
13871 * off all outbound and inbound traffic and make sure
13872 * the device does not generate any interrupts.
13873 */
13874
13875 /* disable send contexts and SDMA engines */
13876 write_csr(dd, SEND_CTRL, 0);
13877 for (i = 0; i < dd->chip_send_contexts; i++)
13878 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13879 for (i = 0; i < dd->chip_sdma_engines; i++)
13880 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13881 /* disable port (turn off RXE inbound traffic) and contexts */
13882 write_csr(dd, RCV_CTRL, 0);
13883 for (i = 0; i < dd->chip_rcv_contexts; i++)
13884 write_csr(dd, RCV_CTXT_CTRL, 0);
13885 /* mask all interrupt sources */
13886 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
Jubin John8638b772016-02-14 20:19:24 -080013887 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013888
13889 /*
13890 * DC Reset: do a full DC reset before the register clear.
13891 * A recommended length of time to hold is one CSR read,
13892 * so reread the CceDcCtrl. Then, hold the DC in reset
13893 * across the clear.
13894 */
13895 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
Jubin John50e5dcb2016-02-14 20:19:41 -080013896 (void)read_csr(dd, CCE_DC_CTRL);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013897
13898 if (use_flr) {
13899 /*
13900 * A FLR will reset the SPC core and part of the PCIe.
13901 * The parts that need to be restored have already been
13902 * saved.
13903 */
13904 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13905
13906 /* do the FLR, the DC reset will remain */
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013907 pcie_flr(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013908
13909 /* restore command and BARs */
13910 restore_pci_variables(dd);
13911
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013912 if (is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040013913 dd_dev_info(dd, "Resetting CSRs with FLR\n");
Christoph Hellwig21c433a2017-04-25 14:36:19 -050013914 pcie_flr(dd->pcidev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013915 restore_pci_variables(dd);
13916 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040013917 } else {
13918 dd_dev_info(dd, "Resetting CSRs with writes\n");
13919 reset_cce_csrs(dd);
13920 reset_txe_csrs(dd);
13921 reset_rxe_csrs(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013922 reset_misc_csrs(dd);
13923 }
13924 /* clear the DC reset */
13925 write_csr(dd, CCE_DC_CTRL, 0);
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040013926
Mike Marciniszyn77241052015-07-30 15:17:43 -040013927 /* Set the LED off */
Sebastian Sanchez773d04512016-02-09 14:29:40 -080013928 setextled(dd, 0);
13929
Mike Marciniszyn77241052015-07-30 15:17:43 -040013930 /*
13931 * Clear the QSFP reset.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013932 * An FLR enforces a 0 on all out pins. The driver does not touch
Mike Marciniszyn77241052015-07-30 15:17:43 -040013933 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013934 * anything plugged constantly in reset, if it pays attention
Mike Marciniszyn77241052015-07-30 15:17:43 -040013935 * to RESET_N.
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013936 * Prime examples of this are optical cables. Set all pins high.
Mike Marciniszyn77241052015-07-30 15:17:43 -040013937 * I2CCLK and I2CDAT will change per direction, and INT_N and
13938 * MODPRS_N are input only and their value is ignored.
13939 */
Easwar Hariharan72a67ba2015-11-06 20:06:57 -050013940 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13941 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
Dean Luicka2ee27a2016-03-05 08:49:50 -080013942 init_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013943}
13944
13945static void init_early_variables(struct hfi1_devdata *dd)
13946{
13947 int i;
13948
13949 /* assign link credit variables */
13950 dd->vau = CM_VAU;
13951 dd->link_credits = CM_GLOBAL_CREDITS;
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050013952 if (is_ax(dd))
Mike Marciniszyn77241052015-07-30 15:17:43 -040013953 dd->link_credits--;
13954 dd->vcu = cu_to_vcu(hfi1_cu);
13955 /* enough room for 8 MAD packets plus header - 17K */
13956 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13957 if (dd->vl15_init > dd->link_credits)
13958 dd->vl15_init = dd->link_credits;
13959
13960 write_uninitialized_csrs_and_memories(dd);
13961
13962 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13963 for (i = 0; i < dd->num_pports; i++) {
13964 struct hfi1_pportdata *ppd = &dd->pport[i];
13965
13966 set_partition_keys(ppd);
13967 }
13968 init_sc2vl_tables(dd);
13969}
13970
13971static void init_kdeth_qp(struct hfi1_devdata *dd)
13972{
13973 /* user changed the KDETH_QP */
13974 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13975 /* out of range or illegal value */
13976 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13977 kdeth_qp = 0;
13978 }
13979 if (kdeth_qp == 0) /* not set, or failed range check */
13980 kdeth_qp = DEFAULT_KDETH_QP;
13981
13982 write_csr(dd, SEND_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013983 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13984 SEND_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013985
13986 write_csr(dd, RCV_BTH_QP,
Jubin John17fb4f22016-02-14 20:21:52 -080013987 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13988 RCV_BTH_QP_KDETH_QP_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040013989}
13990
13991/**
13992 * init_qpmap_table
13993 * @dd - device data
13994 * @first_ctxt - first context
13995 * @last_ctxt - first context
13996 *
13997 * This return sets the qpn mapping table that
13998 * is indexed by qpn[8:1].
13999 *
14000 * The routine will round robin the 256 settings
14001 * from first_ctxt to last_ctxt.
14002 *
14003 * The first/last looks ahead to having specialized
14004 * receive contexts for mgmt and bypass. Normal
14005 * verbs traffic will assumed to be on a range
14006 * of receive contexts.
14007 */
14008static void init_qpmap_table(struct hfi1_devdata *dd,
14009 u32 first_ctxt,
14010 u32 last_ctxt)
14011{
14012 u64 reg = 0;
14013 u64 regno = RCV_QP_MAP_TABLE;
14014 int i;
14015 u64 ctxt = first_ctxt;
14016
Dean Luick60d585ad2016-04-12 10:50:35 -070014017 for (i = 0; i < 256; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014018 reg |= ctxt << (8 * (i % 8));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014019 ctxt++;
14020 if (ctxt > last_ctxt)
14021 ctxt = first_ctxt;
Dean Luick60d585ad2016-04-12 10:50:35 -070014022 if (i % 8 == 7) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014023 write_csr(dd, regno, reg);
14024 reg = 0;
14025 regno += 8;
14026 }
14027 }
Mike Marciniszyn77241052015-07-30 15:17:43 -040014028
14029 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14030 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14031}
14032
Dean Luick372cc85a2016-04-12 11:30:51 -070014033struct rsm_map_table {
14034 u64 map[NUM_MAP_REGS];
14035 unsigned int used;
14036};
14037
Dean Luickb12349a2016-04-12 11:31:33 -070014038struct rsm_rule_data {
14039 u8 offset;
14040 u8 pkt_type;
14041 u32 field1_off;
14042 u32 field2_off;
14043 u32 index1_off;
14044 u32 index1_width;
14045 u32 index2_off;
14046 u32 index2_width;
14047 u32 mask1;
14048 u32 value1;
14049 u32 mask2;
14050 u32 value2;
14051};
14052
Dean Luick372cc85a2016-04-12 11:30:51 -070014053/*
14054 * Return an initialized RMT map table for users to fill in. OK if it
14055 * returns NULL, indicating no table.
14056 */
14057static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14058{
14059 struct rsm_map_table *rmt;
14060 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14061
14062 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14063 if (rmt) {
14064 memset(rmt->map, rxcontext, sizeof(rmt->map));
14065 rmt->used = 0;
14066 }
14067
14068 return rmt;
14069}
14070
14071/*
14072 * Write the final RMT map table to the chip and free the table. OK if
14073 * table is NULL.
14074 */
14075static void complete_rsm_map_table(struct hfi1_devdata *dd,
14076 struct rsm_map_table *rmt)
14077{
14078 int i;
14079
14080 if (rmt) {
14081 /* write table to chip */
14082 for (i = 0; i < NUM_MAP_REGS; i++)
14083 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14084
14085 /* enable RSM */
14086 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14087 }
14088}
14089
Dean Luickb12349a2016-04-12 11:31:33 -070014090/*
14091 * Add a receive side mapping rule.
14092 */
14093static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14094 struct rsm_rule_data *rrd)
14095{
14096 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14097 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14098 1ull << rule_index | /* enable bit */
14099 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14100 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14101 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14102 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14103 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14104 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14105 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14106 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14107 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14108 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14109 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14110 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14111 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14112}
14113
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014114/*
14115 * Clear a receive side mapping rule.
14116 */
14117static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14118{
14119 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14120 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14121 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14122}
14123
Dean Luick4a818be2016-04-12 11:31:11 -070014124/* return the number of RSM map table entries that will be used for QOS */
14125static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14126 unsigned int *np)
14127{
14128 int i;
14129 unsigned int m, n;
14130 u8 max_by_vl = 0;
14131
14132 /* is QOS active at all? */
14133 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14134 num_vls == 1 ||
14135 krcvqsset <= 1)
14136 goto no_qos;
14137
14138 /* determine bits for qpn */
14139 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14140 if (krcvqs[i] > max_by_vl)
14141 max_by_vl = krcvqs[i];
14142 if (max_by_vl > 32)
14143 goto no_qos;
14144 m = ilog2(__roundup_pow_of_two(max_by_vl));
14145
14146 /* determine bits for vl */
14147 n = ilog2(__roundup_pow_of_two(num_vls));
14148
14149 /* reject if too much is used */
14150 if ((m + n) > 7)
14151 goto no_qos;
14152
14153 if (mp)
14154 *mp = m;
14155 if (np)
14156 *np = n;
14157
14158 return 1 << (m + n);
14159
14160no_qos:
14161 if (mp)
14162 *mp = 0;
14163 if (np)
14164 *np = 0;
14165 return 0;
14166}
14167
Mike Marciniszyn77241052015-07-30 15:17:43 -040014168/**
14169 * init_qos - init RX qos
14170 * @dd - device data
Dean Luick372cc85a2016-04-12 11:30:51 -070014171 * @rmt - RSM map table
Mike Marciniszyn77241052015-07-30 15:17:43 -040014172 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014173 * This routine initializes Rule 0 and the RSM map table to implement
14174 * quality of service (qos).
Mike Marciniszyn77241052015-07-30 15:17:43 -040014175 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014176 * If all of the limit tests succeed, qos is applied based on the array
14177 * interpretation of krcvqs where entry 0 is VL0.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014178 *
Dean Luick33a9eb52016-04-12 10:50:22 -070014179 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14180 * feed both the RSM map table and the single rule.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014181 */
Dean Luick372cc85a2016-04-12 11:30:51 -070014182static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014183{
Dean Luickb12349a2016-04-12 11:31:33 -070014184 struct rsm_rule_data rrd;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014185 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
Dean Luick372cc85a2016-04-12 11:30:51 -070014186 unsigned int rmt_entries;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014187 u64 reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014188
Dean Luick4a818be2016-04-12 11:31:11 -070014189 if (!rmt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014190 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014191 rmt_entries = qos_rmt_entries(dd, &m, &n);
14192 if (rmt_entries == 0)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014193 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014194 qpns_per_vl = 1 << m;
14195
Dean Luick372cc85a2016-04-12 11:30:51 -070014196 /* enough room in the map table? */
14197 rmt_entries = 1 << (m + n);
14198 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
Easwar Hariharan859bcad2015-12-10 11:13:38 -050014199 goto bail;
Dean Luick4a818be2016-04-12 11:31:11 -070014200
Dean Luick372cc85a2016-04-12 11:30:51 -070014201 /* add qos entries to the the RSM map table */
Dean Luick33a9eb52016-04-12 10:50:22 -070014202 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014203 unsigned tctxt;
14204
14205 for (qpn = 0, tctxt = ctxt;
14206 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14207 unsigned idx, regoff, regidx;
14208
Dean Luick372cc85a2016-04-12 11:30:51 -070014209 /* generate the index the hardware will produce */
14210 idx = rmt->used + ((qpn << n) ^ i);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014211 regoff = (idx % 8) * 8;
14212 regidx = idx / 8;
Dean Luick372cc85a2016-04-12 11:30:51 -070014213 /* replace default with context number */
14214 reg = rmt->map[regidx];
Mike Marciniszyn77241052015-07-30 15:17:43 -040014215 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14216 << regoff);
14217 reg |= (u64)(tctxt++) << regoff;
Dean Luick372cc85a2016-04-12 11:30:51 -070014218 rmt->map[regidx] = reg;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014219 if (tctxt == ctxt + krcvqs[i])
14220 tctxt = ctxt;
14221 }
14222 ctxt += krcvqs[i];
14223 }
Dean Luickb12349a2016-04-12 11:31:33 -070014224
14225 rrd.offset = rmt->used;
14226 rrd.pkt_type = 2;
14227 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14228 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14229 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14230 rrd.index1_width = n;
14231 rrd.index2_off = QPN_SELECT_OFFSET;
14232 rrd.index2_width = m + n;
14233 rrd.mask1 = LRH_BTH_MASK;
14234 rrd.value1 = LRH_BTH_VALUE;
14235 rrd.mask2 = LRH_SC_MASK;
14236 rrd.value2 = LRH_SC_VALUE;
14237
14238 /* add rule 0 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014239 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
Dean Luickb12349a2016-04-12 11:31:33 -070014240
Dean Luick372cc85a2016-04-12 11:30:51 -070014241 /* mark RSM map entries as used */
14242 rmt->used += rmt_entries;
Dean Luick33a9eb52016-04-12 10:50:22 -070014243 /* map everything else to the mcast/err/vl15 context */
14244 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014245 dd->qos_shift = n + 1;
14246 return;
14247bail:
14248 dd->qos_shift = 1;
Niranjana Vishwanathapura82c26112015-11-11 00:35:19 -050014249 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014250}
14251
Dean Luick8f000f72016-04-12 11:32:06 -070014252static void init_user_fecn_handling(struct hfi1_devdata *dd,
14253 struct rsm_map_table *rmt)
14254{
14255 struct rsm_rule_data rrd;
14256 u64 reg;
14257 int i, idx, regoff, regidx;
14258 u8 offset;
14259
14260 /* there needs to be enough room in the map table */
14261 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14262 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14263 return;
14264 }
14265
14266 /*
14267 * RSM will extract the destination context as an index into the
14268 * map table. The destination contexts are a sequential block
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014269 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
Dean Luick8f000f72016-04-12 11:32:06 -070014270 * Map entries are accessed as offset + extracted value. Adjust
14271 * the added offset so this sequence can be placed anywhere in
14272 * the table - as long as the entries themselves do not wrap.
14273 * There are only enough bits in offset for the table size, so
14274 * start with that to allow for a "negative" offset.
14275 */
14276 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014277 (int)dd->first_dyn_alloc_ctxt);
Dean Luick8f000f72016-04-12 11:32:06 -070014278
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014279 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
Dean Luick8f000f72016-04-12 11:32:06 -070014280 i < dd->num_rcv_contexts; i++, idx++) {
14281 /* replace with identity mapping */
14282 regoff = (idx % 8) * 8;
14283 regidx = idx / 8;
14284 reg = rmt->map[regidx];
14285 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14286 reg |= (u64)i << regoff;
14287 rmt->map[regidx] = reg;
14288 }
14289
14290 /*
14291 * For RSM intercept of Expected FECN packets:
14292 * o packet type 0 - expected
14293 * o match on F (bit 95), using select/match 1, and
14294 * o match on SH (bit 133), using select/match 2.
14295 *
14296 * Use index 1 to extract the 8-bit receive context from DestQP
14297 * (start at bit 64). Use that as the RSM map table index.
14298 */
14299 rrd.offset = offset;
14300 rrd.pkt_type = 0;
14301 rrd.field1_off = 95;
14302 rrd.field2_off = 133;
14303 rrd.index1_off = 64;
14304 rrd.index1_width = 8;
14305 rrd.index2_off = 0;
14306 rrd.index2_width = 0;
14307 rrd.mask1 = 1;
14308 rrd.value1 = 1;
14309 rrd.mask2 = 1;
14310 rrd.value2 = 1;
14311
14312 /* add rule 1 */
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014313 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
Dean Luick8f000f72016-04-12 11:32:06 -070014314
14315 rmt->used += dd->num_user_contexts;
14316}
14317
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014318/* Initialize RSM for VNIC */
14319void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14320{
14321 u8 i, j;
14322 u8 ctx_id = 0;
14323 u64 reg;
14324 u32 regoff;
14325 struct rsm_rule_data rrd;
14326
14327 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14328 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14329 dd->vnic.rmt_start);
14330 return;
14331 }
14332
14333 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14334 dd->vnic.rmt_start,
14335 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14336
14337 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14338 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14339 reg = read_csr(dd, regoff);
14340 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14341 /* Update map register with vnic context */
14342 j = (dd->vnic.rmt_start + i) % 8;
14343 reg &= ~(0xffllu << (j * 8));
14344 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14345 /* Wrap up vnic ctx index */
14346 ctx_id %= dd->vnic.num_ctxt;
14347 /* Write back map register */
14348 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14349 dev_dbg(&(dd)->pcidev->dev,
14350 "Vnic rsm map reg[%d] =0x%llx\n",
14351 regoff - RCV_RSM_MAP_TABLE, reg);
14352
14353 write_csr(dd, regoff, reg);
14354 regoff += 8;
14355 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14356 reg = read_csr(dd, regoff);
14357 }
14358 }
14359
14360 /* Add rule for vnic */
14361 rrd.offset = dd->vnic.rmt_start;
14362 rrd.pkt_type = 4;
14363 /* Match 16B packets */
14364 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14365 rrd.mask1 = L2_TYPE_MASK;
14366 rrd.value1 = L2_16B_VALUE;
14367 /* Match ETH L4 packets */
14368 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14369 rrd.mask2 = L4_16B_TYPE_MASK;
14370 rrd.value2 = L4_16B_ETH_VALUE;
14371 /* Calc context from veswid and entropy */
14372 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14373 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14374 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14375 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14376 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14377
14378 /* Enable RSM if not already enabled */
14379 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14380}
14381
14382void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14383{
14384 clear_rsm_rule(dd, RSM_INS_VNIC);
14385
14386 /* Disable RSM if used only by vnic */
14387 if (dd->vnic.rmt_start == 0)
14388 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14389}
14390
Mike Marciniszyn77241052015-07-30 15:17:43 -040014391static void init_rxe(struct hfi1_devdata *dd)
14392{
Dean Luick372cc85a2016-04-12 11:30:51 -070014393 struct rsm_map_table *rmt;
14394
Mike Marciniszyn77241052015-07-30 15:17:43 -040014395 /* enable all receive errors */
14396 write_csr(dd, RCV_ERR_MASK, ~0ull);
Dean Luick372cc85a2016-04-12 11:30:51 -070014397
14398 rmt = alloc_rsm_map_table(dd);
14399 /* set up QOS, including the QPN map table */
14400 init_qos(dd, rmt);
Dean Luick8f000f72016-04-12 11:32:06 -070014401 init_user_fecn_handling(dd, rmt);
Dean Luick372cc85a2016-04-12 11:30:51 -070014402 complete_rsm_map_table(dd, rmt);
Vishwanathapura, Niranjana22807402017-04-12 20:29:29 -070014403 /* record number of used rsm map entries for vnic */
14404 dd->vnic.rmt_start = rmt->used;
Dean Luick372cc85a2016-04-12 11:30:51 -070014405 kfree(rmt);
14406
Mike Marciniszyn77241052015-07-30 15:17:43 -040014407 /*
14408 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14409 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14410 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14411 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14412 * Max_PayLoad_Size set to its minimum of 128.
14413 *
14414 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14415 * (64 bytes). Max_Payload_Size is possibly modified upward in
14416 * tune_pcie_caps() which is called after this routine.
14417 */
14418}
14419
14420static void init_other(struct hfi1_devdata *dd)
14421{
14422 /* enable all CCE errors */
14423 write_csr(dd, CCE_ERR_MASK, ~0ull);
14424 /* enable *some* Misc errors */
14425 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14426 /* enable all DC errors, except LCB */
14427 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14428 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14429}
14430
14431/*
14432 * Fill out the given AU table using the given CU. A CU is defined in terms
14433 * AUs. The table is a an encoding: given the index, how many AUs does that
14434 * represent?
14435 *
14436 * NOTE: Assumes that the register layout is the same for the
14437 * local and remote tables.
14438 */
14439static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14440 u32 csr0to3, u32 csr4to7)
14441{
14442 write_csr(dd, csr0to3,
Jubin John17fb4f22016-02-14 20:21:52 -080014443 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14444 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14445 2ull * cu <<
14446 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14447 4ull * cu <<
14448 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014449 write_csr(dd, csr4to7,
Jubin John17fb4f22016-02-14 20:21:52 -080014450 8ull * cu <<
14451 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14452 16ull * cu <<
14453 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14454 32ull * cu <<
14455 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14456 64ull * cu <<
14457 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014458}
14459
14460static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14461{
14462 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014463 SEND_CM_LOCAL_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014464}
14465
14466void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14467{
14468 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
Jubin John17fb4f22016-02-14 20:21:52 -080014469 SEND_CM_REMOTE_AU_TABLE4_TO7);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014470}
14471
14472static void init_txe(struct hfi1_devdata *dd)
14473{
14474 int i;
14475
14476 /* enable all PIO, SDMA, general, and Egress errors */
14477 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14478 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14479 write_csr(dd, SEND_ERR_MASK, ~0ull);
14480 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14481
14482 /* enable all per-context and per-SDMA engine errors */
14483 for (i = 0; i < dd->chip_send_contexts; i++)
14484 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14485 for (i = 0; i < dd->chip_sdma_engines; i++)
14486 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14487
14488 /* set the local CU to AU mapping */
14489 assign_local_cm_au_table(dd, dd->vcu);
14490
14491 /*
14492 * Set reasonable default for Credit Return Timer
14493 * Don't set on Simulator - causes it to choke.
14494 */
14495 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14496 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14497}
14498
14499int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14500{
14501 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14502 unsigned sctxt;
14503 int ret = 0;
14504 u64 reg;
14505
14506 if (!rcd || !rcd->sc) {
14507 ret = -EINVAL;
14508 goto done;
14509 }
14510 sctxt = rcd->sc->hw_context;
14511 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14512 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14513 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14514 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14515 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14516 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14517 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14518 /*
14519 * Enable send-side J_KEY integrity check, unless this is A0 h/w
Mike Marciniszyn77241052015-07-30 15:17:43 -040014520 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014521 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014522 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14523 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14524 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14525 }
14526
14527 /* Enable J_KEY check on receive context. */
14528 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14529 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14530 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14531 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14532done:
14533 return ret;
14534}
14535
14536int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14537{
14538 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14539 unsigned sctxt;
14540 int ret = 0;
14541 u64 reg;
14542
14543 if (!rcd || !rcd->sc) {
14544 ret = -EINVAL;
14545 goto done;
14546 }
14547 sctxt = rcd->sc->hw_context;
14548 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14549 /*
14550 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14551 * This check would not have been enabled for A0 h/w, see
14552 * set_ctxt_jkey().
14553 */
Mike Marciniszyn995deaf2015-11-16 21:59:29 -050014554 if (!is_ax(dd)) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014555 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14556 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14557 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14558 }
14559 /* Turn off the J_KEY on the receive side */
14560 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14561done:
14562 return ret;
14563}
14564
14565int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14566{
14567 struct hfi1_ctxtdata *rcd;
14568 unsigned sctxt;
14569 int ret = 0;
14570 u64 reg;
14571
Jubin Johne4909742016-02-14 20:22:00 -080014572 if (ctxt < dd->num_rcv_contexts) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014573 rcd = dd->rcd[ctxt];
Jubin Johne4909742016-02-14 20:22:00 -080014574 } else {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014575 ret = -EINVAL;
14576 goto done;
14577 }
14578 if (!rcd || !rcd->sc) {
14579 ret = -EINVAL;
14580 goto done;
14581 }
14582 sctxt = rcd->sc->hw_context;
14583 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14584 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14585 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14586 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14587 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Sebastian Sancheze38d1e42016-04-12 11:22:21 -070014588 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014589 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14590done:
14591 return ret;
14592}
14593
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014594int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014595{
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014596 u8 hw_ctxt;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014597 u64 reg;
14598
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014599 if (!ctxt || !ctxt->sc)
14600 return -EINVAL;
14601
14602 if (ctxt->ctxt >= dd->num_rcv_contexts)
14603 return -EINVAL;
14604
14605 hw_ctxt = ctxt->sc->hw_context;
14606 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014607 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
Michael J. Ruhl637a9a72017-05-04 05:15:03 -070014608 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14609 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14610
14611 return 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014612}
14613
14614/*
14615 * Start doing the clean up the the chip. Our clean up happens in multiple
14616 * stages and this is just the first.
14617 */
14618void hfi1_start_cleanup(struct hfi1_devdata *dd)
14619{
Ashutosh Dixitaffa48d2016-02-03 14:33:06 -080014620 aspm_exit(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014621 free_cntrs(dd);
14622 free_rcverr(dd);
14623 clean_up_interrupts(dd);
Dean Luicka2ee27a2016-03-05 08:49:50 -080014624 finish_chip_resources(dd);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014625}
14626
14627#define HFI_BASE_GUID(dev) \
14628 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14629
14630/*
Dean Luick78eb1292016-03-05 08:49:45 -080014631 * Information can be shared between the two HFIs on the same ASIC
14632 * in the same OS. This function finds the peer device and sets
14633 * up a shared structure.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014634 */
Dean Luick78eb1292016-03-05 08:49:45 -080014635static int init_asic_data(struct hfi1_devdata *dd)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014636{
14637 unsigned long flags;
14638 struct hfi1_devdata *tmp, *peer = NULL;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014639 struct hfi1_asic_data *asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014640 int ret = 0;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014641
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014642 /* pre-allocate the asic structure in case we are the first device */
14643 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14644 if (!asic_data)
14645 return -ENOMEM;
14646
Mike Marciniszyn77241052015-07-30 15:17:43 -040014647 spin_lock_irqsave(&hfi1_devs_lock, flags);
14648 /* Find our peer device */
14649 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14650 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14651 dd->unit != tmp->unit) {
14652 peer = tmp;
14653 break;
14654 }
14655 }
14656
Dean Luick78eb1292016-03-05 08:49:45 -080014657 if (peer) {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014658 /* use already allocated structure */
Dean Luick78eb1292016-03-05 08:49:45 -080014659 dd->asic_data = peer->asic_data;
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014660 kfree(asic_data);
Dean Luick78eb1292016-03-05 08:49:45 -080014661 } else {
Tadeusz Struk98f179a2016-07-06 17:14:47 -040014662 dd->asic_data = asic_data;
Dean Luick78eb1292016-03-05 08:49:45 -080014663 mutex_init(&dd->asic_data->asic_resource_mutex);
14664 }
14665 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014666 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
Dean Luickdba715f2016-07-06 17:28:52 -040014667
14668 /* first one through - set up i2c devices */
14669 if (!peer)
14670 ret = set_up_i2c(dd, dd->asic_data);
14671
Dean Luick78eb1292016-03-05 08:49:45 -080014672 return ret;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014673}
14674
Dean Luick5d9157a2015-11-16 21:59:34 -050014675/*
14676 * Set dd->boardname. Use a generic name if a name is not returned from
14677 * EFI variable space.
14678 *
14679 * Return 0 on success, -ENOMEM if space could not be allocated.
14680 */
14681static int obtain_boardname(struct hfi1_devdata *dd)
14682{
14683 /* generic board description */
14684 const char generic[] =
14685 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14686 unsigned long size;
14687 int ret;
14688
14689 ret = read_hfi1_efi_var(dd, "description", &size,
14690 (void **)&dd->boardname);
14691 if (ret) {
Dean Luick845f8762016-02-03 14:31:57 -080014692 dd_dev_info(dd, "Board description not found\n");
Dean Luick5d9157a2015-11-16 21:59:34 -050014693 /* use generic description */
14694 dd->boardname = kstrdup(generic, GFP_KERNEL);
14695 if (!dd->boardname)
14696 return -ENOMEM;
14697 }
14698 return 0;
14699}
14700
Kaike Wan24487dd2016-02-26 13:33:23 -080014701/*
14702 * Check the interrupt registers to make sure that they are mapped correctly.
14703 * It is intended to help user identify any mismapping by VMM when the driver
14704 * is running in a VM. This function should only be called before interrupt
14705 * is set up properly.
14706 *
14707 * Return 0 on success, -EINVAL on failure.
14708 */
14709static int check_int_registers(struct hfi1_devdata *dd)
14710{
14711 u64 reg;
14712 u64 all_bits = ~(u64)0;
14713 u64 mask;
14714
14715 /* Clear CceIntMask[0] to avoid raising any interrupts */
14716 mask = read_csr(dd, CCE_INT_MASK);
14717 write_csr(dd, CCE_INT_MASK, 0ull);
14718 reg = read_csr(dd, CCE_INT_MASK);
14719 if (reg)
14720 goto err_exit;
14721
14722 /* Clear all interrupt status bits */
14723 write_csr(dd, CCE_INT_CLEAR, all_bits);
14724 reg = read_csr(dd, CCE_INT_STATUS);
14725 if (reg)
14726 goto err_exit;
14727
14728 /* Set all interrupt status bits */
14729 write_csr(dd, CCE_INT_FORCE, all_bits);
14730 reg = read_csr(dd, CCE_INT_STATUS);
14731 if (reg != all_bits)
14732 goto err_exit;
14733
14734 /* Restore the interrupt mask */
14735 write_csr(dd, CCE_INT_CLEAR, all_bits);
14736 write_csr(dd, CCE_INT_MASK, mask);
14737
14738 return 0;
14739err_exit:
14740 write_csr(dd, CCE_INT_MASK, mask);
14741 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14742 return -EINVAL;
14743}
14744
Mike Marciniszyn77241052015-07-30 15:17:43 -040014745/**
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014746 * Allocate and initialize the device structure for the hfi.
Mike Marciniszyn77241052015-07-30 15:17:43 -040014747 * @dev: the pci_dev for hfi1_ib device
14748 * @ent: pci_device_id struct for this dev
14749 *
14750 * Also allocates, initializes, and returns the devdata struct for this
14751 * device instance
14752 *
14753 * This is global, and is called directly at init to set up the
14754 * chip-specific function pointers for later use.
14755 */
14756struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14757 const struct pci_device_id *ent)
14758{
14759 struct hfi1_devdata *dd;
14760 struct hfi1_pportdata *ppd;
14761 u64 reg;
14762 int i, ret;
14763 static const char * const inames[] = { /* implementation names */
14764 "RTL silicon",
14765 "RTL VCS simulation",
14766 "RTL FPGA emulation",
14767 "Functional simulator"
14768 };
Kaike Wan24487dd2016-02-26 13:33:23 -080014769 struct pci_dev *parent = pdev->bus->self;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014770
Jubin John17fb4f22016-02-14 20:21:52 -080014771 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14772 sizeof(struct hfi1_pportdata));
Mike Marciniszyn77241052015-07-30 15:17:43 -040014773 if (IS_ERR(dd))
14774 goto bail;
14775 ppd = dd->pport;
14776 for (i = 0; i < dd->num_pports; i++, ppd++) {
14777 int vl;
14778 /* init common fields */
14779 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14780 /* DC supports 4 link widths */
14781 ppd->link_width_supported =
14782 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14783 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14784 ppd->link_width_downgrade_supported =
14785 ppd->link_width_supported;
14786 /* start out enabling only 4X */
14787 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14788 ppd->link_width_downgrade_enabled =
14789 ppd->link_width_downgrade_supported;
14790 /* link width active is 0 when link is down */
14791 /* link width downgrade active is 0 when link is down */
14792
Jubin Johnd0d236e2016-02-14 20:20:15 -080014793 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14794 num_vls > HFI1_MAX_VLS_SUPPORTED) {
Mike Marciniszyn77241052015-07-30 15:17:43 -040014795 hfi1_early_err(&pdev->dev,
14796 "Invalid num_vls %u, using %u VLs\n",
14797 num_vls, HFI1_MAX_VLS_SUPPORTED);
14798 num_vls = HFI1_MAX_VLS_SUPPORTED;
14799 }
14800 ppd->vls_supported = num_vls;
14801 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014802 ppd->actual_vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014803 /* Set the default MTU. */
14804 for (vl = 0; vl < num_vls; vl++)
14805 dd->vld[vl].mtu = hfi1_max_mtu;
14806 dd->vld[15].mtu = MAX_MAD_PACKET;
14807 /*
14808 * Set the initial values to reasonable default, will be set
14809 * for real when link is up.
14810 */
14811 ppd->lstate = IB_PORT_DOWN;
14812 ppd->overrun_threshold = 0x4;
14813 ppd->phy_error_threshold = 0xf;
14814 ppd->port_crc_mode_enabled = link_crc_mask;
14815 /* initialize supported LTP CRC mode */
14816 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14817 /* initialize enabled LTP CRC mode */
14818 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14819 /* start in offline */
14820 ppd->host_link_state = HLS_DN_OFFLINE;
14821 init_vl_arb_caches(ppd);
Byczkowski, Jakubbec7c792017-05-29 17:21:32 -070014822 ppd->pstate = PLS_OFFLINE;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014823 }
14824
14825 dd->link_default = HLS_DN_POLL;
14826
14827 /*
14828 * Do remaining PCIe setup and save PCIe values in dd.
14829 * Any error printing is already done by the init code.
14830 * On return, we have the chip mapped.
14831 */
Easwar Hariharan26ea2542016-10-17 04:19:58 -070014832 ret = hfi1_pcie_ddinit(dd, pdev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014833 if (ret < 0)
14834 goto bail_free;
14835
14836 /* verify that reads actually work, save revision for reset check */
14837 dd->revision = read_csr(dd, CCE_REVISION);
14838 if (dd->revision == ~(u64)0) {
14839 dd_dev_err(dd, "cannot read chip CSRs\n");
14840 ret = -EINVAL;
14841 goto bail_cleanup;
14842 }
14843 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14844 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14845 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14846 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14847
Jubin John4d114fd2016-02-14 20:21:43 -080014848 /*
Kaike Wan24487dd2016-02-26 13:33:23 -080014849 * Check interrupt registers mapping if the driver has no access to
14850 * the upstream component. In this case, it is likely that the driver
14851 * is running in a VM.
14852 */
14853 if (!parent) {
14854 ret = check_int_registers(dd);
14855 if (ret)
14856 goto bail_cleanup;
14857 }
14858
14859 /*
Jubin John4d114fd2016-02-14 20:21:43 -080014860 * obtain the hardware ID - NOT related to unit, which is a
14861 * software enumeration
14862 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040014863 reg = read_csr(dd, CCE_REVISION2);
14864 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14865 & CCE_REVISION2_HFI_ID_MASK;
14866 /* the variable size will remove unwanted bits */
14867 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14868 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14869 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
Jubin John17fb4f22016-02-14 20:21:52 -080014870 dd->icode < ARRAY_SIZE(inames) ?
14871 inames[dd->icode] : "unknown", (int)dd->irev);
Mike Marciniszyn77241052015-07-30 15:17:43 -040014872
14873 /* speeds the hardware can support */
14874 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14875 /* speeds allowed to run at */
14876 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14877 /* give a reasonable active value, will be set on link up */
14878 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14879
14880 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14881 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14882 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14883 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14884 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14885 /* fix up link widths for emulation _p */
14886 ppd = dd->pport;
14887 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14888 ppd->link_width_supported =
14889 ppd->link_width_enabled =
14890 ppd->link_width_downgrade_supported =
14891 ppd->link_width_downgrade_enabled =
14892 OPA_LINK_WIDTH_1X;
14893 }
14894 /* insure num_vls isn't larger than number of sdma engines */
14895 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14896 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
Dean Luick11a59092015-12-01 15:38:18 -050014897 num_vls, dd->chip_sdma_engines);
14898 num_vls = dd->chip_sdma_engines;
14899 ppd->vls_supported = dd->chip_sdma_engines;
Mike Marciniszyn8a4d3442016-02-14 12:46:01 -080014900 ppd->vls_operational = ppd->vls_supported;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014901 }
14902
14903 /*
14904 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14905 * Limit the max if larger than the field holds. If timeout is
14906 * non-zero, then the calculated field will be at least 1.
14907 *
14908 * Must be after icode is set up - the cclock rate depends
14909 * on knowing the hardware being used.
14910 */
14911 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14912 if (dd->rcv_intr_timeout_csr >
14913 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14914 dd->rcv_intr_timeout_csr =
14915 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14916 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14917 dd->rcv_intr_timeout_csr = 1;
14918
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014919 /* needs to be done before we look for the peer device */
14920 read_guid(dd);
14921
Dean Luick78eb1292016-03-05 08:49:45 -080014922 /* set up shared ASIC data with peer device */
14923 ret = init_asic_data(dd);
14924 if (ret)
14925 goto bail_cleanup;
Easwar Hariharan7c03ed82015-10-26 10:28:28 -040014926
Mike Marciniszyn77241052015-07-30 15:17:43 -040014927 /* obtain chip sizes, reset chip CSRs */
14928 init_chip(dd);
14929
14930 /* read in the PCIe link speed information */
14931 ret = pcie_speeds(dd);
14932 if (ret)
14933 goto bail_cleanup;
14934
Dean Luicke83eba22016-09-30 04:41:45 -070014935 /* call before get_platform_config(), after init_chip_resources() */
14936 ret = eprom_init(dd);
14937 if (ret)
14938 goto bail_free_rcverr;
14939
Easwar Hariharanc3838b32016-02-09 14:29:13 -080014940 /* Needs to be called before hfi1_firmware_init */
14941 get_platform_config(dd);
14942
Mike Marciniszyn77241052015-07-30 15:17:43 -040014943 /* read in firmware */
14944 ret = hfi1_firmware_init(dd);
14945 if (ret)
14946 goto bail_cleanup;
14947
14948 /*
14949 * In general, the PCIe Gen3 transition must occur after the
14950 * chip has been idled (so it won't initiate any PCIe transactions
14951 * e.g. an interrupt) and before the driver changes any registers
14952 * (the transition will reset the registers).
14953 *
14954 * In particular, place this call after:
14955 * - init_chip() - the chip will not initiate any PCIe transactions
14956 * - pcie_speeds() - reads the current link speed
14957 * - hfi1_firmware_init() - the needed firmware is ready to be
14958 * downloaded
14959 */
14960 ret = do_pcie_gen3_transition(dd);
14961 if (ret)
14962 goto bail_cleanup;
14963
14964 /* start setting dd values and adjusting CSRs */
14965 init_early_variables(dd);
14966
14967 parse_platform_config(dd);
14968
Dean Luick5d9157a2015-11-16 21:59:34 -050014969 ret = obtain_boardname(dd);
14970 if (ret)
Mike Marciniszyn77241052015-07-30 15:17:43 -040014971 goto bail_cleanup;
Mike Marciniszyn77241052015-07-30 15:17:43 -040014972
14973 snprintf(dd->boardversion, BOARD_VERS_MAX,
Dean Luick5d9157a2015-11-16 21:59:34 -050014974 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
Mike Marciniszyn77241052015-07-30 15:17:43 -040014975 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
Mike Marciniszyn77241052015-07-30 15:17:43 -040014976 (u32)dd->majrev,
14977 (u32)dd->minrev,
14978 (dd->revision >> CCE_REVISION_SW_SHIFT)
14979 & CCE_REVISION_SW_MASK);
14980
14981 ret = set_up_context_variables(dd);
14982 if (ret)
14983 goto bail_cleanup;
14984
14985 /* set initial RXE CSRs */
14986 init_rxe(dd);
14987 /* set initial TXE CSRs */
14988 init_txe(dd);
14989 /* set initial non-RXE, non-TXE CSRs */
14990 init_other(dd);
14991 /* set up KDETH QP prefix in both RX and TX CSRs */
14992 init_kdeth_qp(dd);
14993
Dennis Dalessandro41973442016-07-25 07:52:36 -070014994 ret = hfi1_dev_affinity_init(dd);
14995 if (ret)
14996 goto bail_cleanup;
Mitko Haralanov957558c2016-02-03 14:33:40 -080014997
Mike Marciniszyn77241052015-07-30 15:17:43 -040014998 /* send contexts must be set up before receive contexts */
14999 ret = init_send_contexts(dd);
15000 if (ret)
15001 goto bail_cleanup;
15002
15003 ret = hfi1_create_ctxts(dd);
15004 if (ret)
15005 goto bail_cleanup;
15006
15007 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15008 /*
15009 * rcd[0] is guaranteed to be valid by this point. Also, all
15010 * context are using the same value, as per the module parameter.
15011 */
15012 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15013
15014 ret = init_pervl_scs(dd);
15015 if (ret)
15016 goto bail_cleanup;
15017
15018 /* sdma init */
15019 for (i = 0; i < dd->num_pports; ++i) {
15020 ret = sdma_init(dd, i);
15021 if (ret)
15022 goto bail_cleanup;
15023 }
15024
15025 /* use contexts created by hfi1_create_ctxts */
15026 ret = set_up_interrupts(dd);
15027 if (ret)
15028 goto bail_cleanup;
15029
15030 /* set up LCB access - must be after set_up_interrupts() */
15031 init_lcb_access(dd);
15032
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015033 /*
15034 * Serial number is created from the base guid:
15035 * [27:24] = base guid [38:35]
15036 * [23: 0] = base guid [23: 0]
15037 */
Mike Marciniszyn77241052015-07-30 15:17:43 -040015038 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
Ira Weinyfc0b76c2016-07-27 21:09:40 -040015039 (dd->base_guid & 0xFFFFFF) |
15040 ((dd->base_guid >> 11) & 0xF000000));
Mike Marciniszyn77241052015-07-30 15:17:43 -040015041
15042 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15043 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15044 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15045
15046 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15047 if (ret)
15048 goto bail_clear_intr;
Mike Marciniszyn77241052015-07-30 15:17:43 -040015049
15050 thermal_init(dd);
15051
15052 ret = init_cntrs(dd);
15053 if (ret)
15054 goto bail_clear_intr;
15055
15056 ret = init_rcverr(dd);
15057 if (ret)
15058 goto bail_free_cntrs;
15059
Tadeusz Strukacd7c8f2016-10-25 08:57:55 -070015060 init_completion(&dd->user_comp);
15061
15062 /* The user refcount starts with one to inidicate an active device */
15063 atomic_set(&dd->user_refcount, 1);
15064
Mike Marciniszyn77241052015-07-30 15:17:43 -040015065 goto bail;
15066
15067bail_free_rcverr:
15068 free_rcverr(dd);
15069bail_free_cntrs:
15070 free_cntrs(dd);
15071bail_clear_intr:
15072 clean_up_interrupts(dd);
15073bail_cleanup:
15074 hfi1_pcie_ddcleanup(dd);
15075bail_free:
15076 hfi1_free_devdata(dd);
15077 dd = ERR_PTR(ret);
15078bail:
15079 return dd;
15080}
15081
15082static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15083 u32 dw_len)
15084{
15085 u32 delta_cycles;
15086 u32 current_egress_rate = ppd->current_egress_rate;
15087 /* rates here are in units of 10^6 bits/sec */
15088
15089 if (desired_egress_rate == -1)
15090 return 0; /* shouldn't happen */
15091
15092 if (desired_egress_rate >= current_egress_rate)
15093 return 0; /* we can't help go faster, only slower */
15094
15095 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15096 egress_cycles(dw_len * 4, current_egress_rate);
15097
15098 return (u16)delta_cycles;
15099}
15100
Mike Marciniszyn77241052015-07-30 15:17:43 -040015101/**
15102 * create_pbc - build a pbc for transmission
15103 * @flags: special case flags or-ed in built pbc
15104 * @srate: static rate
15105 * @vl: vl
15106 * @dwlen: dword length (header words + data words + pbc words)
15107 *
15108 * Create a PBC with the given flags, rate, VL, and length.
15109 *
15110 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15111 * for verbs, which does not use this PSM feature. The lone other caller
15112 * is for the diagnostic interface which calls this if the user does not
15113 * supply their own PBC.
15114 */
15115u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15116 u32 dw_len)
15117{
15118 u64 pbc, delay = 0;
15119
15120 if (unlikely(srate_mbs))
15121 delay = delay_cycles(ppd, srate_mbs, dw_len);
15122
15123 pbc = flags
15124 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15125 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15126 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15127 | (dw_len & PBC_LENGTH_DWS_MASK)
15128 << PBC_LENGTH_DWS_SHIFT;
15129
15130 return pbc;
15131}
15132
15133#define SBUS_THERMAL 0x4f
15134#define SBUS_THERM_MONITOR_MODE 0x1
15135
15136#define THERM_FAILURE(dev, ret, reason) \
15137 dd_dev_err((dd), \
15138 "Thermal sensor initialization failed: %s (%d)\n", \
15139 (reason), (ret))
15140
15141/*
Jakub Pawlakcde10af2016-05-12 10:23:35 -070015142 * Initialize the thermal sensor.
Mike Marciniszyn77241052015-07-30 15:17:43 -040015143 *
15144 * After initialization, enable polling of thermal sensor through
15145 * SBus interface. In order for this to work, the SBus Master
15146 * firmware has to be loaded due to the fact that the HW polling
15147 * logic uses SBus interrupts, which are not supported with
15148 * default firmware. Otherwise, no data will be returned through
15149 * the ASIC_STS_THERM CSR.
15150 */
15151static int thermal_init(struct hfi1_devdata *dd)
15152{
15153 int ret = 0;
15154
15155 if (dd->icode != ICODE_RTL_SILICON ||
Dean Luicka4536982016-03-05 08:50:11 -080015156 check_chip_resource(dd, CR_THERM_INIT, NULL))
Mike Marciniszyn77241052015-07-30 15:17:43 -040015157 return ret;
15158
Dean Luick576531f2016-03-05 08:50:01 -080015159 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15160 if (ret) {
15161 THERM_FAILURE(dd, ret, "Acquire SBus");
15162 return ret;
15163 }
15164
Mike Marciniszyn77241052015-07-30 15:17:43 -040015165 dd_dev_info(dd, "Initializing thermal sensor\n");
Jareer Abdel-Qader4ef98982015-11-06 20:07:00 -050015166 /* Disable polling of thermal readings */
15167 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15168 msleep(100);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015169 /* Thermal Sensor Initialization */
15170 /* Step 1: Reset the Thermal SBus Receiver */
15171 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15172 RESET_SBUS_RECEIVER, 0);
15173 if (ret) {
15174 THERM_FAILURE(dd, ret, "Bus Reset");
15175 goto done;
15176 }
15177 /* Step 2: Set Reset bit in Thermal block */
15178 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15179 WRITE_SBUS_RECEIVER, 0x1);
15180 if (ret) {
15181 THERM_FAILURE(dd, ret, "Therm Block Reset");
15182 goto done;
15183 }
15184 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15185 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15186 WRITE_SBUS_RECEIVER, 0x32);
15187 if (ret) {
15188 THERM_FAILURE(dd, ret, "Write Clock Div");
15189 goto done;
15190 }
15191 /* Step 4: Select temperature mode */
15192 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15193 WRITE_SBUS_RECEIVER,
15194 SBUS_THERM_MONITOR_MODE);
15195 if (ret) {
15196 THERM_FAILURE(dd, ret, "Write Mode Sel");
15197 goto done;
15198 }
15199 /* Step 5: De-assert block reset and start conversion */
15200 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15201 WRITE_SBUS_RECEIVER, 0x2);
15202 if (ret) {
15203 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15204 goto done;
15205 }
15206 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15207 msleep(22);
15208
15209 /* Enable polling of thermal readings */
15210 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
Dean Luicka4536982016-03-05 08:50:11 -080015211
15212 /* Set initialized flag */
15213 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15214 if (ret)
15215 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15216
Mike Marciniszyn77241052015-07-30 15:17:43 -040015217done:
Dean Luick576531f2016-03-05 08:50:01 -080015218 release_chip_resource(dd, CR_SBUS);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015219 return ret;
15220}
15221
15222static void handle_temp_err(struct hfi1_devdata *dd)
15223{
15224 struct hfi1_pportdata *ppd = &dd->pport[0];
15225 /*
15226 * Thermal Critical Interrupt
15227 * Put the device into forced freeze mode, take link down to
15228 * offline, and put DC into reset.
15229 */
15230 dd_dev_emerg(dd,
15231 "Critical temperature reached! Forcing device into freeze mode!\n");
15232 dd->flags |= HFI1_FORCED_FREEZE;
Jubin John8638b772016-02-14 20:19:24 -080015233 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015234 /*
15235 * Shut DC down as much and as quickly as possible.
15236 *
15237 * Step 1: Take the link down to OFFLINE. This will cause the
15238 * 8051 to put the Serdes in reset. However, we don't want to
15239 * go through the entire link state machine since we want to
15240 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15241 * but rather an attempt to save the chip.
15242 * Code below is almost the same as quiet_serdes() but avoids
15243 * all the extra work and the sleeps.
15244 */
15245 ppd->driver_link_ready = 0;
15246 ppd->link_enabled = 0;
Harish Chegondibf640092016-03-05 08:49:29 -080015247 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15248 PLS_OFFLINE);
Mike Marciniszyn77241052015-07-30 15:17:43 -040015249 /*
15250 * Step 2: Shutdown LCB and 8051
15251 * After shutdown, do not restore DC_CFG_RESET value.
15252 */
15253 dc_shutdown(dd);
15254}